pydaptivefiltering
1# pydaptivefiltering/__init__.py 2 3from .base import AdaptiveFilter 4from .lms import * 5from .rls import * 6from .set_membership import * 7from .lattice import * 8from .fast_rls import * 9from .qr_decomposition import * 10from .iir import * 11from .nonlinear import * 12from .subband import * 13from .blind import * 14from .kalman import * 15__version__ = "1.0.0" 16__author__ = "BruninLima" 17 18__all__ = ["AdaptiveFilter", 19 "LMS", "NLMS", "AffineProjection", "SignData", "SignError", "DualSign", 20 "LMSNewton", "Power2ErrorLMS", "TDomainLMS", "TDomainDCT", "TDomainDFT", 21 "RLS", "RLSAlt", 22 "SMNLMS", "SMBNLMS", "SMAffineProjection", "SimplifiedSMPUAP", "SimplifiedSMAP", 23 "LRLSPosteriori", "LRLSErrorFeedback", "LRLSPriori", "NormalizedLRLS", 24 "FastRLS", "StabFastRLS", 25 "QRRLS", 26 "ErrorEquation", "GaussNewton", "GaussNewtonGradient", "RLSIIR", "SteiglitzMcBride", 27 "BilinearRLS", "ComplexRBF", "MultilayerPerceptron", "RBF", "VolterraLMS", "VolterraRLS", 28 "CFDLMS", "DLCLLMS", "OLSBLMS", 29 "AffineProjectionCM", "CMA", "Godard", "Sato", 30 "Kalman", 31 "info"] 32 33 34def info(): 35 """Imprime informações sobre a cobertura de algoritmos da biblioteca.""" 36 print("\n" + "="*70) 37 print(" PyDaptive Filtering - Complete Library Overview") 38 print(" Reference: 'Adaptive Filtering' by Paulo S. R. Diniz") 39 print("="*70) 40 sections = { 41 "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain", 42 "Cap 5 (RLS)": "Standard RLS, Alternative RLS", 43 "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP", 44 "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS", 45 "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS", 46 "Cap 9 (QR)": "QR-Decomposition Based RLS", 47 "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR", 48 "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS", 49 "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS", 50 "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection", 51 "Cap 17 (Kalman)": "Kalman Filter", 52 } 53 for cap, algs in sections.items(): 54 print(f"\n{cap:25}: {algs}") 55 56 print("\n" + "-"*70) 57 print("Usage example: from pydaptivefiltering import LMS") 58 print("Documentation: help(pydaptivefiltering.LMS)") 59 print("="*70 + "\n")
133class AdaptiveFilter(ABC): 134 """Abstract base class for all adaptive filters. 135 136 Parameters 137 ---------- 138 filter_order: 139 Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used 140 as a generic size indicator for base allocation. 141 w_init: 142 Initial coefficient vector. If None, initialized to zeros. 143 144 Notes 145 ----- 146 - Subclasses should set `supports_complex = True` if they support complex-valued data. 147 - Subclasses are expected to call `_record_history()` every iteration (or use helper methods) 148 if they want coefficient trajectories. 149 """ 150 151 supports_complex: bool = False 152 153 def __init__(self, filter_order: int, w_init: Optional[ArrayLike] = None) -> None: 154 self.filter_order: int = int(filter_order) 155 self._dtype = complex if self.supports_complex else float 156 157 self.regressor: np.ndarray = np.zeros(self.filter_order + 1, dtype=self._dtype) 158 159 if w_init is not None: 160 self.w: np.ndarray = np.asarray(w_init, dtype=self._dtype) 161 else: 162 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 163 164 self.w_history: List[np.ndarray] = [] 165 self._record_history() 166 167 def _record_history(self) -> None: 168 """Store a snapshot of current coefficients.""" 169 self.w_history.append(np.asarray(self.w).copy()) 170 171 def _final_coeffs(self, coefficients: Any) -> Any: 172 """Return last coefficients from a history container (list or 2D array).""" 173 if coefficients is None: 174 return None 175 if isinstance(coefficients, list) and len(coefficients) > 0: 176 return coefficients[-1] 177 try: 178 a = np.asarray(coefficients) 179 if a.ndim == 2: 180 return a[-1, :] 181 except Exception: 182 pass 183 return coefficients 184 185 def _pack_results( 186 self, 187 outputs: np.ndarray, 188 errors: np.ndarray, 189 runtime_s: float, 190 error_type: str = "a_priori", 191 extra: Optional[Dict[str, Any]] = None, 192 ) -> OptimizationResult: 193 """Centralized output packaging to standardize results.""" 194 return OptimizationResult( 195 outputs=np.asarray(outputs), 196 errors=np.asarray(errors), 197 coefficients=np.asarray(self.w_history), 198 algorithm=self.__class__.__name__, 199 runtime_ms=float(runtime_s) * 1000.0, 200 error_type=str(error_type), 201 extra=extra, 202 ) 203 204 def filter_signal(self, input_signal: ArrayLike) -> np.ndarray: 205 """Filter an input signal using current coefficients. 206 207 Default implementation assumes an FIR structure with taps `self.w` and 208 regressor convention: 209 x_k = [x[k], x[k-1], ..., x[k-m]] 210 and output: 211 y[k] = w^H x_k (Hermitian for complex) 212 """ 213 x = np.asarray(input_signal, dtype=self._dtype) 214 n_samples = x.size 215 y = np.zeros(n_samples, dtype=self._dtype) 216 217 x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype) 218 x_padded[self.filter_order:] = x 219 220 for k in range(n_samples): 221 x_k = x_padded[k : k + self.filter_order + 1][::-1] 222 y[k] = np.dot(self.w.conj(), x_k) 223 224 return y 225 226 @classmethod 227 def default_test_init_kwargs(cls, order: int) -> dict: 228 """Override in subclasses to provide init kwargs for standardized tests.""" 229 return {} 230 231 @abstractmethod 232 def optimize( 233 self, 234 input_signal: ArrayLike, 235 desired_signal: ArrayLike, 236 **kwargs: Any, 237 ) -> Any: 238 """Run the adaptation procedure. 239 240 Subclasses should return either: 241 - OptimizationResult (recommended), or 242 - dict-like with standardized keys, if you are migrating older code. 243 """ 244 raise NotImplementedError 245 246 def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None: 247 """Reset coefficients and history.""" 248 if w_new is not None: 249 self.w = np.asarray(w_new, dtype=self._dtype) 250 else: 251 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 252 self.w_history = [] 253 self._record_history()
Abstract base class for all adaptive filters.
Parameters
filter_order: Order in the FIR sense (number of taps - 1). For non-FIR structures, it can be used as a generic size indicator for base allocation. w_init: Initial coefficient vector. If None, initialized to zeros.
Notes
- Subclasses should set
supports_complex = Trueif they support complex-valued data. - Subclasses are expected to call
_record_history()every iteration (or use helper methods) if they want coefficient trajectories.
204 def filter_signal(self, input_signal: ArrayLike) -> np.ndarray: 205 """Filter an input signal using current coefficients. 206 207 Default implementation assumes an FIR structure with taps `self.w` and 208 regressor convention: 209 x_k = [x[k], x[k-1], ..., x[k-m]] 210 and output: 211 y[k] = w^H x_k (Hermitian for complex) 212 """ 213 x = np.asarray(input_signal, dtype=self._dtype) 214 n_samples = x.size 215 y = np.zeros(n_samples, dtype=self._dtype) 216 217 x_padded = np.zeros(n_samples + self.filter_order, dtype=self._dtype) 218 x_padded[self.filter_order:] = x 219 220 for k in range(n_samples): 221 x_k = x_padded[k : k + self.filter_order + 1][::-1] 222 y[k] = np.dot(self.w.conj(), x_k) 223 224 return y
Filter an input signal using current coefficients.
Default implementation assumes an FIR structure with taps self.w and
regressor convention:
x_k = [x[k], x[k-1], ..., x[k-m]]
and output:
y[k] = w^H x_k (Hermitian for complex)
226 @classmethod 227 def default_test_init_kwargs(cls, order: int) -> dict: 228 """Override in subclasses to provide init kwargs for standardized tests.""" 229 return {}
Override in subclasses to provide init kwargs for standardized tests.
231 @abstractmethod 232 def optimize( 233 self, 234 input_signal: ArrayLike, 235 desired_signal: ArrayLike, 236 **kwargs: Any, 237 ) -> Any: 238 """Run the adaptation procedure. 239 240 Subclasses should return either: 241 - OptimizationResult (recommended), or 242 - dict-like with standardized keys, if you are migrating older code. 243 """ 244 raise NotImplementedError
Run the adaptation procedure.
Subclasses should return either:
- OptimizationResult (recommended), or
- dict-like with standardized keys, if you are migrating older code.
246 def reset_filter(self, w_new: Optional[ArrayLike] = None) -> None: 247 """Reset coefficients and history.""" 248 if w_new is not None: 249 self.w = np.asarray(w_new, dtype=self._dtype) 250 else: 251 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 252 self.w_history = [] 253 self._record_history()
Reset coefficients and history.
27class LMS(AdaptiveFilter): 28 """ 29 Complex Least-Mean Squares (LMS) adaptive filter. 30 31 Standard complex LMS algorithm for adaptive FIR filtering, following Diniz 32 (Alg. 3.2). The method performs a stochastic-gradient update using the 33 instantaneous a priori error. 34 35 Parameters 36 ---------- 37 filter_order : int 38 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 39 step_size : float, optional 40 Adaptation step size ``mu``. Default is 1e-2. 41 w_init : array_like of complex, optional 42 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 43 initializes with zeros. 44 45 Notes 46 ----- 47 At iteration ``k``, form the regressor vector (newest sample first): 48 49 .. math:: 50 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 51 52 The a priori output and error are 53 54 .. math:: 55 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k], 56 57 and the LMS update is 58 59 .. math:: 60 w[k+1] = w[k] + \\mu\\, e^*[k] \\, x_k. 61 62 This implementation: 63 - uses complex arithmetic (``supports_complex=True``), 64 - returns the a priori error ``e[k]``, 65 - records coefficient history via the base class. 66 67 References 68 ---------- 69 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 70 Implementation*, 5th ed., Algorithm 3.2. 71 """ 72 73 supports_complex: bool = True 74 75 step_size: float 76 77 def __init__( 78 self, 79 filter_order: int, 80 step_size: float = 1e-2, 81 w_init: Optional[ArrayLike] = None, 82 ) -> None: 83 super().__init__(filter_order=int(filter_order), w_init=w_init) 84 self.step_size = float(step_size) 85 86 @validate_input 87 def optimize( 88 self, 89 input_signal: np.ndarray, 90 desired_signal: np.ndarray, 91 verbose: bool = False, 92 ) -> OptimizationResult: 93 """ 94 Executes the LMS adaptation loop over paired input/desired sequences. 95 96 Parameters 97 ---------- 98 input_signal : array_like of complex 99 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 100 desired_signal : array_like of complex 101 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 102 verbose : bool, optional 103 If True, prints the total runtime after completion. 104 105 Returns 106 ------- 107 OptimizationResult 108 Result object with fields: 109 - outputs : ndarray of complex, shape ``(N,)`` 110 Scalar output sequence, ``y[k] = w^H[k] x_k``. 111 - errors : ndarray of complex, shape ``(N,)`` 112 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 113 - coefficients : ndarray of complex 114 Coefficient history recorded by the base class. 115 - error_type : str 116 Set to ``"a_priori"``. 117 """ 118 tic: float = perf_counter() 119 120 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 121 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 122 123 n_samples: int = int(x.size) 124 m: int = int(self.filter_order) 125 126 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 127 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 128 129 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 130 x_padded[m:] = x 131 132 for k in range(n_samples): 133 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 134 135 y_k: complex = complex(np.vdot(self.w, x_k)) 136 outputs[k] = y_k 137 138 e_k: complex = d[k] - y_k 139 errors[k] = e_k 140 141 self.w = self.w + self.step_size * np.conj(e_k) * x_k 142 143 self._record_history() 144 145 runtime_s: float = perf_counter() - tic 146 if verbose: 147 print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms") 148 149 return self._pack_results( 150 outputs=outputs, 151 errors=errors, 152 runtime_s=runtime_s, 153 error_type="a_priori", 154 )
Complex Least-Mean Squares (LMS) adaptive filter.
Standard complex LMS algorithm for adaptive FIR filtering, following Diniz (Alg. 3.2). The method performs a stochastic-gradient update using the instantaneous a priori error.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
The a priori output and error are
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k],$$
and the LMS update is
$$w[k+1] = w[k] + \mu\, e^*[k] \, x_k.$$
This implementation:
- uses complex arithmetic (supports_complex=True),
- returns the a priori error e[k],
- records coefficient history via the base class.
References
86 @validate_input 87 def optimize( 88 self, 89 input_signal: np.ndarray, 90 desired_signal: np.ndarray, 91 verbose: bool = False, 92 ) -> OptimizationResult: 93 """ 94 Executes the LMS adaptation loop over paired input/desired sequences. 95 96 Parameters 97 ---------- 98 input_signal : array_like of complex 99 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 100 desired_signal : array_like of complex 101 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 102 verbose : bool, optional 103 If True, prints the total runtime after completion. 104 105 Returns 106 ------- 107 OptimizationResult 108 Result object with fields: 109 - outputs : ndarray of complex, shape ``(N,)`` 110 Scalar output sequence, ``y[k] = w^H[k] x_k``. 111 - errors : ndarray of complex, shape ``(N,)`` 112 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 113 - coefficients : ndarray of complex 114 Coefficient history recorded by the base class. 115 - error_type : str 116 Set to ``"a_priori"``. 117 """ 118 tic: float = perf_counter() 119 120 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 121 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 122 123 n_samples: int = int(x.size) 124 m: int = int(self.filter_order) 125 126 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 127 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 128 129 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 130 x_padded[m:] = x 131 132 for k in range(n_samples): 133 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 134 135 y_k: complex = complex(np.vdot(self.w, x_k)) 136 outputs[k] = y_k 137 138 e_k: complex = d[k] - y_k 139 errors[k] = e_k 140 141 self.w = self.w + self.step_size * np.conj(e_k) * x_k 142 143 self._record_history() 144 145 runtime_s: float = perf_counter() - tic 146 if verbose: 147 print(f"[LMS] Completed in {runtime_s * 1000:.03f} ms") 148 149 return self._pack_results( 150 outputs=outputs, 151 errors=errors, 152 runtime_s=runtime_s, 153 error_type="a_priori", 154 )
Executes the LMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
27class NLMS(AdaptiveFilter): 28 """ 29 Complex Normalized Least-Mean Squares (NLMS) adaptive filter. 30 31 Normalized LMS algorithm for adaptive FIR filtering, following Diniz 32 (Alg. 4.3). The method normalizes the step size by the instantaneous 33 regressor energy to improve stability and reduce sensitivity to input 34 scaling. 35 36 Parameters 37 ---------- 38 filter_order : int 39 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 40 step_size : float, optional 41 Base adaptation step size ``mu``. Default is 1e-2. 42 gamma : float, optional 43 Regularization constant ``gamma`` used in the normalization denominator 44 to avoid division by zero (or near-zero regressor energy). Default is 1e-6. 45 w_init : array_like of complex, optional 46 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 47 initializes with zeros. 48 49 Notes 50 ----- 51 At iteration ``k``, form the regressor vector (newest sample first): 52 53 .. math:: 54 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 55 56 The a priori output and error are 57 58 .. math:: 59 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 60 61 Define the instantaneous regressor energy 62 63 .. math:: 64 \\|x_k\\|^2 = x_k^H x_k, 65 66 and the normalized step size 67 68 .. math:: 69 \\mu_k = \\frac{\\mu}{\\|x_k\\|^2 + \\gamma}. 70 71 The NLMS update is then 72 73 .. math:: 74 w[k+1] = w[k] + \\mu_k\\, e^*[k] \\, x_k. 75 76 This implementation: 77 - uses complex arithmetic (``supports_complex=True``), 78 - returns the a priori error ``e[k]``, 79 - records coefficient history via the base class. 80 81 References 82 ---------- 83 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 84 Implementation*, 5th ed., Algorithm 4.3. 85 """ 86 87 supports_complex: bool = True 88 89 step_size: float 90 gamma: float 91 92 def __init__( 93 self, 94 filter_order: int, 95 step_size: float = 1e-2, 96 gamma: float = 1e-6, 97 w_init: Optional[ArrayLike] = None, 98 ) -> None: 99 super().__init__(filter_order=int(filter_order), w_init=w_init) 100 self.step_size = float(step_size) 101 self.gamma = float(gamma) 102 103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 ) -> OptimizationResult: 110 """ 111 Executes the NLMS adaptation loop over paired input/desired sequences. 112 113 Parameters 114 ---------- 115 input_signal : array_like of complex 116 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 117 desired_signal : array_like of complex 118 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 119 verbose : bool, optional 120 If True, prints the total runtime after completion. 121 122 Returns 123 ------- 124 OptimizationResult 125 Result object with fields: 126 - outputs : ndarray of complex, shape ``(N,)`` 127 Scalar output sequence, ``y[k] = w^H[k] x_k``. 128 - errors : ndarray of complex, shape ``(N,)`` 129 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 130 - coefficients : ndarray of complex 131 Coefficient history recorded by the base class. 132 - error_type : str 133 Set to ``"a_priori"``. 134 """ 135 tic: float = perf_counter() 136 137 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 138 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 139 140 n_samples: int = int(x.size) 141 m: int = int(self.filter_order) 142 143 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 144 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 145 146 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 147 x_padded[m:] = x 148 149 for k in range(n_samples): 150 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 151 152 y_k: complex = complex(np.vdot(self.w, x_k)) 153 outputs[k] = y_k 154 155 e_k: complex = d[k] - y_k 156 errors[k] = e_k 157 158 norm_xk: float = float(np.vdot(x_k, x_k).real) 159 mu_k: float = self.step_size / (norm_xk + self.gamma) 160 161 self.w = self.w + mu_k * np.conj(e_k) * x_k 162 163 self._record_history() 164 165 runtime_s: float = perf_counter() - tic 166 if verbose: 167 print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms") 168 169 return self._pack_results( 170 outputs=outputs, 171 errors=errors, 172 runtime_s=runtime_s, 173 error_type="a_priori", 174 )
Complex Normalized Least-Mean Squares (NLMS) adaptive filter.
Normalized LMS algorithm for adaptive FIR filtering, following Diniz (Alg. 4.3). The method normalizes the step size by the instantaneous regressor energy to improve stability and reduce sensitivity to input scaling.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
step_size : float, optional
Base adaptation step size mu. Default is 1e-2.
gamma : float, optional
Regularization constant gamma used in the normalization denominator
to avoid division by zero (or near-zero regressor energy). Default is 1e-6.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
The a priori output and error are
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Define the instantaneous regressor energy
$$\|x_k\|^2 = x_k^H x_k,$$
and the normalized step size
$$\mu_k = \frac{\mu}{\|x_k\|^2 + \gamma}.$$
The NLMS update is then
$$w[k+1] = w[k] + \mu_k\, e^*[k] \, x_k.$$
This implementation:
- uses complex arithmetic (supports_complex=True),
- returns the a priori error e[k],
- records coefficient history via the base class.
References
103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 ) -> OptimizationResult: 110 """ 111 Executes the NLMS adaptation loop over paired input/desired sequences. 112 113 Parameters 114 ---------- 115 input_signal : array_like of complex 116 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 117 desired_signal : array_like of complex 118 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 119 verbose : bool, optional 120 If True, prints the total runtime after completion. 121 122 Returns 123 ------- 124 OptimizationResult 125 Result object with fields: 126 - outputs : ndarray of complex, shape ``(N,)`` 127 Scalar output sequence, ``y[k] = w^H[k] x_k``. 128 - errors : ndarray of complex, shape ``(N,)`` 129 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 130 - coefficients : ndarray of complex 131 Coefficient history recorded by the base class. 132 - error_type : str 133 Set to ``"a_priori"``. 134 """ 135 tic: float = perf_counter() 136 137 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 138 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 139 140 n_samples: int = int(x.size) 141 m: int = int(self.filter_order) 142 143 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 144 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 145 146 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 147 x_padded[m:] = x 148 149 for k in range(n_samples): 150 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 151 152 y_k: complex = complex(np.vdot(self.w, x_k)) 153 outputs[k] = y_k 154 155 e_k: complex = d[k] - y_k 156 errors[k] = e_k 157 158 norm_xk: float = float(np.vdot(x_k, x_k).real) 159 mu_k: float = self.step_size / (norm_xk + self.gamma) 160 161 self.w = self.w + mu_k * np.conj(e_k) * x_k 162 163 self._record_history() 164 165 runtime_s: float = perf_counter() - tic 166 if verbose: 167 print(f"[NLMS] Completed in {runtime_s * 1000:.03f} ms") 168 169 return self._pack_results( 170 outputs=outputs, 171 errors=errors, 172 runtime_s=runtime_s, 173 error_type="a_priori", 174 )
Executes the NLMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
26class AffineProjection(AdaptiveFilter): 27 """ 28 Complex Affine-Projection Algorithm (APA) adaptive filter. 29 30 Affine-projection LMS-type algorithm that reuses the last ``L+1`` regressor 31 vectors to accelerate convergence relative to LMS/NLMS, following Diniz 32 (Alg. 4.6). Per iteration, the method solves a small linear system of size 33 ``(L+1) x (L+1)``. 34 35 Parameters 36 ---------- 37 filter_order : int 38 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 39 step_size : float, optional 40 Adaptation step size (relaxation factor) ``mu``. Default is 1e-2. 41 gamma : float, optional 42 Diagonal loading (regularization) ``gamma`` applied to the projection 43 correlation matrix for numerical stability. Default is 1e-6. 44 L : int, optional 45 Reuse factor (projection order). The algorithm uses ``L + 1`` most recent 46 regressors. Default is 2. 47 w_init : array_like of complex, optional 48 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 49 initializes with zeros. 50 51 Notes 52 ----- 53 At iteration ``k``, form the projection matrix and desired vector: 54 55 - ``X(k) ∈ C^{(L+1) x (M+1)}``, whose rows are regressor vectors, with the most 56 recent regressor at row 0. 57 - ``d_vec(k) ∈ C^{L+1}``, stacking the most recent desired samples, with 58 ``d[k]`` at index 0. 59 60 The projection output and error vectors are: 61 62 .. math:: 63 y_{vec}(k) = X(k)\\,w^*(k) \\in \\mathbb{C}^{L+1}, 64 65 .. math:: 66 e_{vec}(k) = d_{vec}(k) - y_{vec}(k). 67 68 The update direction ``u(k)`` is obtained by solving the regularized system: 69 70 .. math:: 71 (X(k)X^H(k) + \\gamma I_{L+1})\\,u(k) = e_{vec}(k), 72 73 and the coefficient update is: 74 75 .. math:: 76 w(k+1) = w(k) + \\mu X^H(k)\\,u(k). 77 78 This implementation returns only the *most recent* scalar components: 79 80 - ``y[k] = y_vec(k)[0]`` 81 - ``e[k] = e_vec(k)[0]`` 82 83 References 84 ---------- 85 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 86 Implementation*, 5th ed., Algorithm 4.6. 87 """ 88 89 supports_complex: bool = True 90 91 step_size: float 92 gamma: float 93 memory_length: int 94 95 def __init__( 96 self, 97 filter_order: int, 98 step_size: float = 1e-2, 99 gamma: float = 1e-6, 100 L: int = 2, 101 w_init: Optional[ArrayLike] = None, 102 ) -> None: 103 super().__init__(filter_order=int(filter_order), w_init=w_init) 104 self.step_size = float(step_size) 105 self.gamma = float(gamma) 106 self.memory_length = int(L) 107 108 @validate_input 109 def optimize( 110 self, 111 input_signal: np.ndarray, 112 desired_signal: np.ndarray, 113 verbose: bool = False, 114 return_internal_states: bool = False, 115 ) -> OptimizationResult: 116 """ 117 Executes the Affine Projection adaptation loop over paired input/desired sequences. 118 119 Parameters 120 ---------- 121 input_signal : array_like of complex 122 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 123 desired_signal : array_like of complex 124 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 125 verbose : bool, optional 126 If True, prints the total runtime after completion. 127 return_internal_states : bool, optional 128 If True, includes the last internal states in ``result.extra``: 129 ``"last_regressor_matrix"`` (``X(k)``) and 130 ``"last_correlation_matrix"`` (``X(k)X^H(k) + gamma I``). 131 132 Returns 133 ------- 134 OptimizationResult 135 Result object with fields: 136 - outputs : ndarray of complex, shape ``(N,)`` 137 Scalar output sequence, ``y[k] = y_vec(k)[0]``. 138 - errors : ndarray of complex, shape ``(N,)`` 139 Scalar a priori error sequence, ``e[k] = e_vec(k)[0]``. 140 - coefficients : ndarray of complex 141 Coefficient history recorded by the base class. 142 - error_type : str 143 Set to ``"a_priori"``. 144 - extra : dict, optional 145 Present only if ``return_internal_states=True``. 146 """ 147 tic: float = perf_counter() 148 149 dtype = complex 150 x = np.asarray(input_signal, dtype=dtype).ravel() 151 d = np.asarray(desired_signal, dtype=dtype).ravel() 152 153 n_samples: int = int(x.size) 154 m: int = int(self.filter_order) 155 L: int = int(self.memory_length) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=dtype) 158 errors: np.ndarray = np.zeros(n_samples, dtype=dtype) 159 160 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype) 161 x_padded[m:] = x 162 163 X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype) 164 D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype) 165 166 last_corr: Optional[np.ndarray] = None 167 168 eye_L: np.ndarray = np.eye(L + 1, dtype=dtype) 169 170 for k in range(n_samples): 171 X_matrix[1:] = X_matrix[:-1] 172 X_matrix[0] = x_padded[k : k + m + 1][::-1] 173 174 D_vector[1:] = D_vector[:-1] 175 D_vector[0] = d[k] 176 177 Y_vector: np.ndarray = X_matrix @ self.w.conj() 178 E_vector: np.ndarray = D_vector - Y_vector 179 180 outputs[k] = Y_vector[0] 181 errors[k] = E_vector[0] 182 183 corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L) 184 last_corr = corr_matrix 185 186 try: 187 u: np.ndarray = np.linalg.solve(corr_matrix, E_vector) 188 except np.linalg.LinAlgError: 189 u = np.linalg.pinv(corr_matrix) @ E_vector 190 191 self.w = self.w + self.step_size * (X_matrix.T @ u.conj()) 192 self._record_history() 193 194 runtime_s: float = perf_counter() - tic 195 if verbose: 196 print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms") 197 198 extra = None 199 if return_internal_states: 200 extra = { 201 "last_regressor_matrix": X_matrix.copy(), 202 "last_correlation_matrix": None if last_corr is None else last_corr.copy(), 203 } 204 205 return self._pack_results( 206 outputs=outputs, 207 errors=errors, 208 runtime_s=runtime_s, 209 error_type="a_priori", 210 extra=extra, 211 )
Complex Affine-Projection Algorithm (APA) adaptive filter.
Affine-projection LMS-type algorithm that reuses the last L+1 regressor
vectors to accelerate convergence relative to LMS/NLMS, following Diniz
(Alg. 4.6). Per iteration, the method solves a small linear system of size
(L+1) x (L+1).
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
step_size : float, optional
Adaptation step size (relaxation factor) mu. Default is 1e-2.
gamma : float, optional
Diagonal loading (regularization) gamma applied to the projection
correlation matrix for numerical stability. Default is 1e-6.
L : int, optional
Reuse factor (projection order). The algorithm uses L + 1 most recent
regressors. Default is 2.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the projection matrix and desired vector:
X(k) ∈ C^{(L+1) x (M+1)}, whose rows are regressor vectors, with the most recent regressor at row 0.d_vec(k) ∈ C^{L+1}, stacking the most recent desired samples, withd[k]at index 0.
The projection output and error vectors are:
$$y_{vec}(k) = X(k)\,w^*(k) \in \mathbb{C}^{L+1},$$
$$e_{vec}(k) = d_{vec}(k) - y_{vec}(k).$$
The update direction u(k) is obtained by solving the regularized system:
$$(X(k)X^H(k) + \gamma I_{L+1})\,u(k) = e_{vec}(k),$$
and the coefficient update is:
$$w(k+1) = w(k) + \mu X^H(k)\,u(k).$$
This implementation returns only the most recent scalar components:
y[k] = y_vec(k)[0]e[k] = e_vec(k)[0]
References
95 def __init__( 96 self, 97 filter_order: int, 98 step_size: float = 1e-2, 99 gamma: float = 1e-6, 100 L: int = 2, 101 w_init: Optional[ArrayLike] = None, 102 ) -> None: 103 super().__init__(filter_order=int(filter_order), w_init=w_init) 104 self.step_size = float(step_size) 105 self.gamma = float(gamma) 106 self.memory_length = int(L)
108 @validate_input 109 def optimize( 110 self, 111 input_signal: np.ndarray, 112 desired_signal: np.ndarray, 113 verbose: bool = False, 114 return_internal_states: bool = False, 115 ) -> OptimizationResult: 116 """ 117 Executes the Affine Projection adaptation loop over paired input/desired sequences. 118 119 Parameters 120 ---------- 121 input_signal : array_like of complex 122 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 123 desired_signal : array_like of complex 124 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 125 verbose : bool, optional 126 If True, prints the total runtime after completion. 127 return_internal_states : bool, optional 128 If True, includes the last internal states in ``result.extra``: 129 ``"last_regressor_matrix"`` (``X(k)``) and 130 ``"last_correlation_matrix"`` (``X(k)X^H(k) + gamma I``). 131 132 Returns 133 ------- 134 OptimizationResult 135 Result object with fields: 136 - outputs : ndarray of complex, shape ``(N,)`` 137 Scalar output sequence, ``y[k] = y_vec(k)[0]``. 138 - errors : ndarray of complex, shape ``(N,)`` 139 Scalar a priori error sequence, ``e[k] = e_vec(k)[0]``. 140 - coefficients : ndarray of complex 141 Coefficient history recorded by the base class. 142 - error_type : str 143 Set to ``"a_priori"``. 144 - extra : dict, optional 145 Present only if ``return_internal_states=True``. 146 """ 147 tic: float = perf_counter() 148 149 dtype = complex 150 x = np.asarray(input_signal, dtype=dtype).ravel() 151 d = np.asarray(desired_signal, dtype=dtype).ravel() 152 153 n_samples: int = int(x.size) 154 m: int = int(self.filter_order) 155 L: int = int(self.memory_length) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=dtype) 158 errors: np.ndarray = np.zeros(n_samples, dtype=dtype) 159 160 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=dtype) 161 x_padded[m:] = x 162 163 X_matrix: np.ndarray = np.zeros((L + 1, m + 1), dtype=dtype) 164 D_vector: np.ndarray = np.zeros(L + 1, dtype=dtype) 165 166 last_corr: Optional[np.ndarray] = None 167 168 eye_L: np.ndarray = np.eye(L + 1, dtype=dtype) 169 170 for k in range(n_samples): 171 X_matrix[1:] = X_matrix[:-1] 172 X_matrix[0] = x_padded[k : k + m + 1][::-1] 173 174 D_vector[1:] = D_vector[:-1] 175 D_vector[0] = d[k] 176 177 Y_vector: np.ndarray = X_matrix @ self.w.conj() 178 E_vector: np.ndarray = D_vector - Y_vector 179 180 outputs[k] = Y_vector[0] 181 errors[k] = E_vector[0] 182 183 corr_matrix: np.ndarray = (X_matrix @ X_matrix.conj().T) + (self.gamma * eye_L) 184 last_corr = corr_matrix 185 186 try: 187 u: np.ndarray = np.linalg.solve(corr_matrix, E_vector) 188 except np.linalg.LinAlgError: 189 u = np.linalg.pinv(corr_matrix) @ E_vector 190 191 self.w = self.w + self.step_size * (X_matrix.T @ u.conj()) 192 self._record_history() 193 194 runtime_s: float = perf_counter() - tic 195 if verbose: 196 print(f"[AffineProjection] Completed in {runtime_s * 1000:.02f} ms") 197 198 extra = None 199 if return_internal_states: 200 extra = { 201 "last_regressor_matrix": X_matrix.copy(), 202 "last_correlation_matrix": None if last_corr is None else last_corr.copy(), 203 } 204 205 return self._pack_results( 206 outputs=outputs, 207 errors=errors, 208 runtime_s=runtime_s, 209 error_type="a_priori", 210 extra=extra, 211 )
Executes the Affine Projection adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"last_regressor_matrix" (X(k)) and
"last_correlation_matrix" (X(k)X^H(k) + gamma I).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = y_vec(k)[0].
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = e_vec(k)[0].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
27class SignData(AdaptiveFilter): 28 """ 29 Complex Sign-Data LMS adaptive filter. 30 31 Low-complexity LMS variant in which the regressor vector is replaced by its 32 element-wise sign. This reduces multiplications (since the update uses a 33 ternary/sign regressor), at the expense of slower convergence and/or larger 34 steady-state misadjustment in many scenarios. 35 36 Parameters 37 ---------- 38 filter_order : int 39 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 40 step_size : float, optional 41 Adaptation step size ``mu``. Default is 1e-2. 42 w_init : array_like of complex, optional 43 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 44 initializes with zeros. 45 46 Notes 47 ----- 48 At iteration ``k``, form the regressor vector (newest sample first): 49 50 .. math:: 51 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 52 53 The a priori output and error are 54 55 .. math:: 56 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 57 58 Define the element-wise sign regressor ``\\operatorname{sign}(x_k)``. 59 The update implemented here is 60 61 .. math:: 62 w[k+1] = w[k] + 2\\mu\\, e^*[k] \\, \\operatorname{sign}(x_k). 63 64 Implementation details 65 - For complex inputs, ``numpy.sign`` applies element-wise and returns 66 ``x/|x|`` when ``x != 0`` and ``0`` when ``x == 0``. 67 - The factor ``2`` in the update matches the implementation in this 68 module (consistent with common LMS gradient conventions). 69 70 References 71 ---------- 72 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 73 Implementation*, 5th ed., Algorithm 4.1 (sign-based LMS variants). 74 """ 75 76 supports_complex: bool = True 77 78 def __init__( 79 self, 80 filter_order: int, 81 step_size: float = 1e-2, 82 w_init: Optional[ArrayLike] = None, 83 ) -> None: 84 super().__init__(filter_order=int(filter_order), w_init=w_init) 85 self.step_size = float(step_size) 86 87 @validate_input 88 def optimize( 89 self, 90 input_signal: np.ndarray, 91 desired_signal: np.ndarray, 92 verbose: bool = False, 93 return_internal_states: bool = False, 94 ) -> OptimizationResult: 95 """ 96 Executes the Sign-Data LMS adaptation loop over paired input/desired sequences. 97 98 Parameters 99 ---------- 100 input_signal : array_like of complex 101 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 102 desired_signal : array_like of complex 103 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 104 verbose : bool, optional 105 If True, prints the total runtime after completion. 106 return_internal_states : bool, optional 107 If True, includes the last internal state in ``result.extra``: 108 ``"last_sign_regressor"`` (``sign(x_k)``). 109 110 Returns 111 ------- 112 OptimizationResult 113 Result object with fields: 114 - outputs : ndarray of complex, shape ``(N,)`` 115 Scalar output sequence, ``y[k] = w^H[k] x_k``. 116 - errors : ndarray of complex, shape ``(N,)`` 117 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 118 - coefficients : ndarray of complex 119 Coefficient history recorded by the base class. 120 - error_type : str 121 Set to ``"a_priori"``. 122 - extra : dict, optional 123 Present only if ``return_internal_states=True``. 124 """ 125 t0 = perf_counter() 126 127 x = np.asarray(input_signal, dtype=complex).ravel() 128 d = np.asarray(desired_signal, dtype=complex).ravel() 129 130 n_samples = int(x.size) 131 m = int(self.filter_order) 132 133 outputs = np.zeros(n_samples, dtype=complex) 134 errors = np.zeros(n_samples, dtype=complex) 135 136 x_padded = np.zeros(n_samples + m, dtype=complex) 137 x_padded[m:] = x 138 139 last_sign_xk: Optional[np.ndarray] = None 140 141 for k in range(n_samples): 142 x_k = x_padded[k : k + m + 1][::-1] 143 144 y_k = complex(np.vdot(self.w, x_k)) 145 outputs[k] = y_k 146 147 e_k = d[k] - y_k 148 errors[k] = e_k 149 150 sign_xk = np.sign(x_k) 151 last_sign_xk = sign_xk 152 153 self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk 154 self._record_history() 155 156 runtime_s = float(perf_counter() - t0) 157 if verbose: 158 print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms") 159 160 extra: Optional[Dict[str, Any]] = None 161 if return_internal_states: 162 extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()} 163 164 return self._pack_results( 165 outputs=outputs, 166 errors=errors, 167 runtime_s=runtime_s, 168 error_type="a_priori", 169 extra=extra, 170 )
Complex Sign-Data LMS adaptive filter.
Low-complexity LMS variant in which the regressor vector is replaced by its element-wise sign. This reduces multiplications (since the update uses a ternary/sign regressor), at the expense of slower convergence and/or larger steady-state misadjustment in many scenarios.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
The a priori output and error are
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Define the element-wise sign regressor \operatorname{sign}(x_k).
The update implemented here is
$$w[k+1] = w[k] + 2\mu\, e^*[k] \, \operatorname{sign}(x_k).$$
Implementation details
- For complex inputs, numpy.sign applies element-wise and returns
x/|x| when x != 0 and 0 when x == 0.
- The factor 2 in the update matches the implementation in this
module (consistent with common LMS gradient conventions).
References
87 @validate_input 88 def optimize( 89 self, 90 input_signal: np.ndarray, 91 desired_signal: np.ndarray, 92 verbose: bool = False, 93 return_internal_states: bool = False, 94 ) -> OptimizationResult: 95 """ 96 Executes the Sign-Data LMS adaptation loop over paired input/desired sequences. 97 98 Parameters 99 ---------- 100 input_signal : array_like of complex 101 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 102 desired_signal : array_like of complex 103 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 104 verbose : bool, optional 105 If True, prints the total runtime after completion. 106 return_internal_states : bool, optional 107 If True, includes the last internal state in ``result.extra``: 108 ``"last_sign_regressor"`` (``sign(x_k)``). 109 110 Returns 111 ------- 112 OptimizationResult 113 Result object with fields: 114 - outputs : ndarray of complex, shape ``(N,)`` 115 Scalar output sequence, ``y[k] = w^H[k] x_k``. 116 - errors : ndarray of complex, shape ``(N,)`` 117 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 118 - coefficients : ndarray of complex 119 Coefficient history recorded by the base class. 120 - error_type : str 121 Set to ``"a_priori"``. 122 - extra : dict, optional 123 Present only if ``return_internal_states=True``. 124 """ 125 t0 = perf_counter() 126 127 x = np.asarray(input_signal, dtype=complex).ravel() 128 d = np.asarray(desired_signal, dtype=complex).ravel() 129 130 n_samples = int(x.size) 131 m = int(self.filter_order) 132 133 outputs = np.zeros(n_samples, dtype=complex) 134 errors = np.zeros(n_samples, dtype=complex) 135 136 x_padded = np.zeros(n_samples + m, dtype=complex) 137 x_padded[m:] = x 138 139 last_sign_xk: Optional[np.ndarray] = None 140 141 for k in range(n_samples): 142 x_k = x_padded[k : k + m + 1][::-1] 143 144 y_k = complex(np.vdot(self.w, x_k)) 145 outputs[k] = y_k 146 147 e_k = d[k] - y_k 148 errors[k] = e_k 149 150 sign_xk = np.sign(x_k) 151 last_sign_xk = sign_xk 152 153 self.w = self.w + (2.0 * self.step_size) * np.conj(e_k) * sign_xk 154 self._record_history() 155 156 runtime_s = float(perf_counter() - t0) 157 if verbose: 158 print(f"[SignData] Completed in {runtime_s * 1000:.03f} ms") 159 160 extra: Optional[Dict[str, Any]] = None 161 if return_internal_states: 162 extra = {"last_sign_regressor": None if last_sign_xk is None else last_sign_xk.copy()} 163 164 return self._pack_results( 165 outputs=outputs, 166 errors=errors, 167 runtime_s=runtime_s, 168 error_type="a_priori", 169 extra=extra, 170 )
Executes the Sign-Data LMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal state in result.extra:
"last_sign_regressor" (sign(x_k)).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
28class SignError(AdaptiveFilter): 29 """ 30 Sign-Error LMS adaptive filter (real-valued). 31 32 Low-complexity LMS variant that replaces the instantaneous error by its sign. 33 This reduces multiplications and can improve robustness under impulsive noise 34 in some scenarios, at the expense of slower convergence and/or larger 35 steady-state misadjustment. 36 37 Parameters 38 ---------- 39 filter_order : int 40 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 step_size : float, optional 42 Adaptation step size ``mu``. Default is 1e-2. 43 w_init : array_like of float, optional 44 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 45 initializes with zeros. 46 47 Notes 48 ----- 49 Real-valued only 50 This implementation is restricted to real-valued signals and coefficients 51 (``supports_complex=False``). The constraint is enforced via 52 ``@ensure_real_signals`` on :meth:`optimize`. 53 54 At iteration ``k``, form the regressor vector (newest sample first): 55 56 .. math:: 57 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{R}^{M+1}. 58 59 The a priori output and error are 60 61 .. math:: 62 y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k]. 63 64 The sign-error update implemented here is 65 66 .. math:: 67 w[k+1] = w[k] + \\mu\\, \\operatorname{sign}(e[k])\\, x_k. 68 69 Implementation details 70 - ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` the update is null. 71 72 References 73 ---------- 74 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 75 Implementation*, 5th ed., Algorithm 4.1 (sign-based LMS variants). 76 """ 77 78 supports_complex: bool = False 79 step_size: float 80 def __init__( 81 self, 82 filter_order: int, 83 step_size: float = 1e-2, 84 w_init: Optional[ArrayLike] = None, 85 ) -> None: 86 super().__init__(filter_order=int(filter_order), w_init=w_init) 87 self.step_size = float(step_size) 88 89 @validate_input 90 @ensure_real_signals 91 def optimize( 92 self, 93 input_signal: np.ndarray, 94 desired_signal: np.ndarray, 95 verbose: bool = False, 96 return_internal_states: bool = False, 97 ) -> OptimizationResult: 98 """ 99 Executes the Sign-Error LMS adaptation loop over paired input/desired sequences. 100 101 Parameters 102 ---------- 103 input_signal : array_like of float 104 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 105 desired_signal : array_like of float 106 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 107 verbose : bool, optional 108 If True, prints the total runtime after completion. 109 return_internal_states : bool, optional 110 If True, includes the last internal state in ``result.extra``: 111 ``"last_sign_error"`` (``sign(e[k])``). 112 113 Returns 114 ------- 115 OptimizationResult 116 Result object with fields: 117 - outputs : ndarray of float, shape ``(N,)`` 118 Scalar output sequence, ``y[k] = w^T[k] x_k``. 119 - errors : ndarray of float, shape ``(N,)`` 120 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 121 - coefficients : ndarray of float 122 Coefficient history recorded by the base class. 123 - error_type : str 124 Set to ``"a_priori"``. 125 - extra : dict, optional 126 Present only if ``return_internal_states=True``. 127 """ 128 t0 = perf_counter() 129 130 x = np.asarray(input_signal, dtype=np.float64).ravel() 131 d = np.asarray(desired_signal, dtype=np.float64).ravel() 132 133 n_samples = int(x.size) 134 m = int(self.filter_order) 135 136 outputs = np.zeros(n_samples, dtype=np.float64) 137 errors = np.zeros(n_samples, dtype=np.float64) 138 139 x_padded = np.zeros(n_samples + m, dtype=np.float64) 140 x_padded[m:] = x 141 142 last_sign_e: Optional[float] = None 143 144 for k in range(n_samples): 145 x_k = x_padded[k : k + m + 1][::-1] 146 147 y_k = float(np.dot(self.w, x_k)) 148 outputs[k] = y_k 149 150 e_k = float(d[k] - y_k) 151 errors[k] = e_k 152 153 s = float(np.sign(e_k)) 154 last_sign_e = s 155 156 self.w = self.w + self.step_size * s * x_k 157 self._record_history() 158 159 runtime_s = float(perf_counter() - t0) 160 if verbose: 161 print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms") 162 163 extra: Optional[Dict[str, Any]] = None 164 if return_internal_states: 165 extra = {"last_sign_error": last_sign_e} 166 167 return self._pack_results( 168 outputs=outputs, 169 errors=errors, 170 runtime_s=runtime_s, 171 error_type="a_priori", 172 extra=extra, 173 )
Sign-Error LMS adaptive filter (real-valued).
Low-complexity LMS variant that replaces the instantaneous error by its sign. This reduces multiplications and can improve robustness under impulsive noise in some scenarios, at the expense of slower convergence and/or larger steady-state misadjustment.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
At iteration k, form the regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{R}^{M+1}.$$
The a priori output and error are
$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$
The sign-error update implemented here is
$$w[k+1] = w[k] + \mu\, \operatorname{sign}(e[k])\, x_k.$$
Implementation details
- numpy.sign(0) = 0; therefore if e[k] == 0 the update is null.
References
89 @validate_input 90 @ensure_real_signals 91 def optimize( 92 self, 93 input_signal: np.ndarray, 94 desired_signal: np.ndarray, 95 verbose: bool = False, 96 return_internal_states: bool = False, 97 ) -> OptimizationResult: 98 """ 99 Executes the Sign-Error LMS adaptation loop over paired input/desired sequences. 100 101 Parameters 102 ---------- 103 input_signal : array_like of float 104 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 105 desired_signal : array_like of float 106 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 107 verbose : bool, optional 108 If True, prints the total runtime after completion. 109 return_internal_states : bool, optional 110 If True, includes the last internal state in ``result.extra``: 111 ``"last_sign_error"`` (``sign(e[k])``). 112 113 Returns 114 ------- 115 OptimizationResult 116 Result object with fields: 117 - outputs : ndarray of float, shape ``(N,)`` 118 Scalar output sequence, ``y[k] = w^T[k] x_k``. 119 - errors : ndarray of float, shape ``(N,)`` 120 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 121 - coefficients : ndarray of float 122 Coefficient history recorded by the base class. 123 - error_type : str 124 Set to ``"a_priori"``. 125 - extra : dict, optional 126 Present only if ``return_internal_states=True``. 127 """ 128 t0 = perf_counter() 129 130 x = np.asarray(input_signal, dtype=np.float64).ravel() 131 d = np.asarray(desired_signal, dtype=np.float64).ravel() 132 133 n_samples = int(x.size) 134 m = int(self.filter_order) 135 136 outputs = np.zeros(n_samples, dtype=np.float64) 137 errors = np.zeros(n_samples, dtype=np.float64) 138 139 x_padded = np.zeros(n_samples + m, dtype=np.float64) 140 x_padded[m:] = x 141 142 last_sign_e: Optional[float] = None 143 144 for k in range(n_samples): 145 x_k = x_padded[k : k + m + 1][::-1] 146 147 y_k = float(np.dot(self.w, x_k)) 148 outputs[k] = y_k 149 150 e_k = float(d[k] - y_k) 151 errors[k] = e_k 152 153 s = float(np.sign(e_k)) 154 last_sign_e = s 155 156 self.w = self.w + self.step_size * s * x_k 157 self._record_history() 158 159 runtime_s = float(perf_counter() - t0) 160 if verbose: 161 print(f"[SignError] Completed in {runtime_s * 1000:.03f} ms") 162 163 extra: Optional[Dict[str, Any]] = None 164 if return_internal_states: 165 extra = {"last_sign_error": last_sign_e} 166 167 return self._pack_results( 168 outputs=outputs, 169 errors=errors, 170 runtime_s=runtime_s, 171 error_type="a_priori", 172 extra=extra, 173 )
Executes the Sign-Error LMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal state in result.extra:
"last_sign_error" (sign(e[k])).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence, y[k] = w^T[k] x_k.
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
28class DualSign(AdaptiveFilter): 29 """ 30 Dual-Sign LMS (DS-LMS) adaptive filter (real-valued). 31 32 Low-complexity LMS variant that uses the *sign* of the instantaneous error 33 and a two-level (piecewise) effective gain selected by the error magnitude. 34 This can reduce the number of multiplications and may improve robustness 35 under impulsive noise in some scenarios, at the expense of larger steady-state 36 misadjustment. 37 38 Parameters 39 ---------- 40 filter_order : int 41 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 42 rho : float 43 Threshold ``rho`` applied to ``|e[k]|`` to select the gain level. 44 gamma : float 45 Gain multiplier applied when ``|e[k]| > rho`` (typically ``gamma > 1``). 46 step_size : float, optional 47 Adaptation step size ``mu``. Default is 1e-2. 48 w_init : array_like of float, optional 49 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 50 initializes with zeros. 51 safe_eps : float, optional 52 Small positive constant kept for API consistency across the library. 53 (Not used by this implementation.) Default is 1e-12. 54 55 Notes 56 ----- 57 Real-valued only 58 This implementation is restricted to real-valued signals and coefficients 59 (``supports_complex=False``). The constraint is enforced via 60 ``@ensure_real_signals`` on :meth:`optimize`. 61 62 Update rule (as implemented) 63 Let the regressor vector be 64 65 .. math:: 66 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T, 67 68 with output and error 69 70 .. math:: 71 y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k]. 72 73 Define the two-level signed term 74 75 .. math:: 76 u[k] = 77 \\begin{cases} 78 \\operatorname{sign}(e[k]), & |e[k]| \\le \\rho \\\\ 79 \\gamma\\,\\operatorname{sign}(e[k]), & |e[k]| > \\rho 80 \\end{cases} 81 82 and update 83 84 .. math:: 85 w[k+1] = w[k] + 2\\mu\\,u[k]\,x_k. 86 87 Implementation details 88 - ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` the update is null. 89 - The factor ``2`` in the update matches the implementation in this 90 module (consistent with common LMS gradient conventions). 91 92 References 93 ---------- 94 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 95 Implementation*, 5th ed., Algorithm 4.1 (modified sign-based variant). 96 """ 97 98 supports_complex: bool = False 99 100 rho: float 101 gamma: float 102 step_size: float 103 104 def __init__( 105 self, 106 filter_order: int, 107 rho: float, 108 gamma: float, 109 step_size: float = 1e-2, 110 w_init: Optional[ArrayLike] = None, 111 *, 112 safe_eps: float = 1e-12, 113 ) -> None: 114 super().__init__(filter_order=int(filter_order), w_init=w_init) 115 self.rho = float(rho) 116 self.gamma = float(gamma) 117 self.step_size = float(step_size) 118 self._safe_eps = float(safe_eps) 119 120 @validate_input 121 @ensure_real_signals 122 def optimize( 123 self, 124 input_signal: np.ndarray, 125 desired_signal: np.ndarray, 126 verbose: bool = False, 127 ) -> OptimizationResult: 128 """ 129 Executes the DS-LMS adaptation loop over paired input/desired sequences. 130 131 Parameters 132 ---------- 133 input_signal : array_like of float 134 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 135 desired_signal : array_like of float 136 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 137 verbose : bool, optional 138 If True, prints the total runtime after completion. 139 140 Returns 141 ------- 142 OptimizationResult 143 Result object with fields: 144 - outputs : ndarray of float, shape ``(N,)`` 145 Scalar output sequence, ``y[k] = w^T[k] x_k``. 146 - errors : ndarray of float, shape ``(N,)`` 147 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 148 - coefficients : ndarray of float 149 Coefficient history recorded by the base class. 150 - error_type : str 151 Set to ``"a_priori"``. 152 """ 153 tic: float = perf_counter() 154 155 x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel() 156 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel() 157 158 n_samples: int = int(x.size) 159 m: int = int(self.filter_order) 160 161 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 162 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 163 164 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64) 165 x_padded[m:] = x 166 167 for k in range(n_samples): 168 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 169 170 y_k: float = float(np.dot(self.w, x_k)) 171 outputs[k] = y_k 172 173 e_k: float = float(d[k] - y_k) 174 errors[k] = e_k 175 176 s: float = float(np.sign(e_k)) 177 if abs(e_k) > self.rho: 178 s *= self.gamma 179 180 self.w = self.w + (2.0 * self.step_size) * s * x_k 181 self._record_history() 182 183 runtime_s: float = perf_counter() - tic 184 if verbose: 185 print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms") 186 187 return self._pack_results( 188 outputs=outputs, 189 errors=errors, 190 runtime_s=runtime_s, 191 error_type="a_priori", 192 )
Dual-Sign LMS (DS-LMS) adaptive filter (real-valued).
Low-complexity LMS variant that uses the sign of the instantaneous error and a two-level (piecewise) effective gain selected by the error magnitude. This can reduce the number of multiplications and may improve robustness under impulsive noise in some scenarios, at the expense of larger steady-state misadjustment.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
rho : float
Threshold rho applied to |e[k]| to select the gain level.
gamma : float
Gain multiplier applied when |e[k]| > rho (typically gamma > 1).
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant kept for API consistency across the library.
(Not used by this implementation.) Default is 1e-12.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Update rule (as implemented) Let the regressor vector be
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T,$$
with output and error
$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$
Define the two-level signed term
$$u[k] =
\begin{cases} \operatorname{sign}(e[k]), & |e[k]| \le \rho \ \gamma\,\operatorname{sign}(e[k]), & |e[k]| > \rho \end{cases}$$
and update
$$w[k+1] = w[k] + 2\mu\,u[k]\,x_k.$$
Implementation details
- numpy.sign(0) = 0; therefore if e[k] == 0 the update is null.
- The factor 2 in the update matches the implementation in this
module (consistent with common LMS gradient conventions).
References
104 def __init__( 105 self, 106 filter_order: int, 107 rho: float, 108 gamma: float, 109 step_size: float = 1e-2, 110 w_init: Optional[ArrayLike] = None, 111 *, 112 safe_eps: float = 1e-12, 113 ) -> None: 114 super().__init__(filter_order=int(filter_order), w_init=w_init) 115 self.rho = float(rho) 116 self.gamma = float(gamma) 117 self.step_size = float(step_size) 118 self._safe_eps = float(safe_eps)
120 @validate_input 121 @ensure_real_signals 122 def optimize( 123 self, 124 input_signal: np.ndarray, 125 desired_signal: np.ndarray, 126 verbose: bool = False, 127 ) -> OptimizationResult: 128 """ 129 Executes the DS-LMS adaptation loop over paired input/desired sequences. 130 131 Parameters 132 ---------- 133 input_signal : array_like of float 134 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 135 desired_signal : array_like of float 136 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 137 verbose : bool, optional 138 If True, prints the total runtime after completion. 139 140 Returns 141 ------- 142 OptimizationResult 143 Result object with fields: 144 - outputs : ndarray of float, shape ``(N,)`` 145 Scalar output sequence, ``y[k] = w^T[k] x_k``. 146 - errors : ndarray of float, shape ``(N,)`` 147 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 148 - coefficients : ndarray of float 149 Coefficient history recorded by the base class. 150 - error_type : str 151 Set to ``"a_priori"``. 152 """ 153 tic: float = perf_counter() 154 155 x: np.ndarray = np.asarray(input_signal, dtype=np.float64).ravel() 156 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64).ravel() 157 158 n_samples: int = int(x.size) 159 m: int = int(self.filter_order) 160 161 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 162 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 163 164 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=np.float64) 165 x_padded[m:] = x 166 167 for k in range(n_samples): 168 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 169 170 y_k: float = float(np.dot(self.w, x_k)) 171 outputs[k] = y_k 172 173 e_k: float = float(d[k] - y_k) 174 errors[k] = e_k 175 176 s: float = float(np.sign(e_k)) 177 if abs(e_k) > self.rho: 178 s *= self.gamma 179 180 self.w = self.w + (2.0 * self.step_size) * s * x_k 181 self._record_history() 182 183 runtime_s: float = perf_counter() - tic 184 if verbose: 185 print(f"[DualSign] Completed in {runtime_s * 1000:.03f} ms") 186 187 return self._pack_results( 188 outputs=outputs, 189 errors=errors, 190 runtime_s=runtime_s, 191 error_type="a_priori", 192 )
Executes the DS-LMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence, y[k] = w^T[k] x_k.
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
27class LMSNewton(AdaptiveFilter): 28 """ 29 Complex LMS-Newton adaptive filter. 30 31 LMS-Newton accelerates the standard complex LMS by preconditioning the 32 instantaneous gradient with a recursive estimate of the inverse input 33 correlation matrix. This often improves convergence speed for strongly 34 correlated inputs, at the cost of maintaining and updating a full 35 ``(M+1) x (M+1)`` matrix per iteration. 36 37 Parameters 38 ---------- 39 filter_order : int 40 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 forgetting_factor : float 42 Forgetting factor ``alpha`` used in the inverse-correlation recursion, 43 with ``0 < forgetting_factor < 1``. Values closer to 1 yield smoother tracking; smaller 44 values adapt faster. 45 initial_inv_rx : array_like of complex 46 Initial inverse correlation matrix ``P(0)`` with shape ``(M + 1, M + 1)``. 47 Typical choices are scaled identities, e.g. ``delta^{-1} I``. 48 step_size : float, optional 49 Adaptation step size ``mu``. Default is 1e-2. 50 w_init : array_like of complex, optional 51 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 52 initializes with zeros. 53 safe_eps : float, optional 54 Small positive constant used to guard denominators in the matrix recursion. 55 Default is 1e-12. 56 57 Notes 58 ----- 59 Complex-valued 60 This implementation assumes complex arithmetic (``supports_complex=True``), 61 with the a priori output computed as ``y[k] = w^H[k] x_k``. 62 63 Recursion (as implemented) 64 Let the regressor vector be 65 66 .. math:: 67 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}, 68 69 and define the output and a priori error as 70 71 .. math:: 72 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 73 74 Maintain an estimate ``P[k] \\approx R_x^{-1}`` using a normalized rank-1 update. 75 With 76 77 .. math:: 78 p_k = P[k] x_k, \\qquad \\phi_k = x_k^H p_k, 79 80 the denominator is 81 82 .. math:: 83 \\mathrm{denom}_k = \\frac{1-\\text{forgetting_factor}}{\\text{forgetting_factor}} + \\phi_k, 84 85 and the update used here is 86 87 .. math:: 88 P[k+1] = 89 \\frac{1}{1-\\text{forgetting_factor}} 90 \\left( 91 P[k] - \\frac{p_k p_k^H}{\\mathrm{denom}_k} 92 \\right). 93 94 The coefficient update uses the preconditioned regressor ``P[k+1] x_k``: 95 96 .. math:: 97 w[k+1] = w[k] + \\mu\\, e^*[k] \\, (P[k+1] x_k). 98 99 Relationship to RLS 100 The recursion for ``P`` is algebraically similar to an RLS covariance update 101 with a particular normalization; however, the coefficient update remains 102 LMS-like, controlled by the step size ``mu``. 103 104 References 105 ---------- 106 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 107 Implementation*, 5th ed., Algorithm 4.2. 108 """ 109 110 supports_complex: bool = True 111 112 forgetting_factor: float 113 step_size: float 114 inv_rx: np.ndarray 115 116 def __init__( 117 self, 118 filter_order: int, 119 forgetting_factor: float, 120 initial_inv_rx: np.ndarray, 121 step_size: float = 1e-2, 122 w_init: Optional[ArrayLike] = None, 123 *, 124 safe_eps: float = 1e-12, 125 ) -> None: 126 super().__init__(filter_order=int(filter_order), w_init=w_init) 127 128 self.forgetting_factor = float(forgetting_factor) 129 if not (0.0 < self.forgetting_factor < 1.0): 130 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor < 1. Got forgetting_factor={self.forgetting_factor}.") 131 132 P0 = np.asarray(initial_inv_rx, dtype=complex) 133 n_taps = int(filter_order) + 1 134 if P0.shape != (n_taps, n_taps): 135 raise ValueError( 136 f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}." 137 ) 138 self.inv_rx = P0 139 140 self.step_size = float(step_size) 141 self._safe_eps = float(safe_eps) 142 143 @validate_input 144 def optimize( 145 self, 146 input_signal: np.ndarray, 147 desired_signal: np.ndarray, 148 verbose: bool = False, 149 ) -> OptimizationResult: 150 """ 151 Executes the LMS-Newton adaptation loop over paired input/desired sequences. 152 153 Parameters 154 ---------- 155 input_signal : array_like of complex 156 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 157 desired_signal : array_like of complex 158 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 159 verbose : bool, optional 160 If True, prints the total runtime after completion. 161 162 Returns 163 ------- 164 OptimizationResult 165 Result object with fields: 166 - outputs : ndarray of complex, shape ``(N,)`` 167 Scalar output sequence, ``y[k] = w^H[k] x_k``. 168 - errors : ndarray of complex, shape ``(N,)`` 169 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 170 - coefficients : ndarray of complex 171 Coefficient history recorded by the base class. 172 - error_type : str 173 Set to ``"a_priori"``. 174 """ 175 tic: float = perf_counter() 176 177 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 178 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 179 180 n_samples: int = int(x.size) 181 m: int = int(self.filter_order) 182 183 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 184 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 185 186 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 187 x_padded[m:] = x 188 189 for k in range(n_samples): 190 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 191 192 y_k: complex = complex(np.vdot(self.w, x_k)) 193 outputs[k] = y_k 194 195 e_k: complex = d[k] - y_k 196 errors[k] = e_k 197 198 x_col: np.ndarray = x_k.reshape(-1, 1) 199 Px: np.ndarray = self.inv_rx @ x_col 200 phi: complex = (x_col.conj().T @ Px).item() 201 202 denom: complex = ((1.0 - self.forgetting_factor) / self.forgetting_factor) + phi 203 if abs(denom) < self._safe_eps: 204 denom = denom + (self._safe_eps + 0.0j) 205 206 self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.forgetting_factor) 207 208 self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel() 209 210 self._record_history() 211 212 runtime_s: float = perf_counter() - tic 213 if verbose: 214 print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms") 215 216 return self._pack_results( 217 outputs=outputs, 218 errors=errors, 219 runtime_s=runtime_s, 220 error_type="a_priori", 221 )
Complex LMS-Newton adaptive filter.
LMS-Newton accelerates the standard complex LMS by preconditioning the
instantaneous gradient with a recursive estimate of the inverse input
correlation matrix. This often improves convergence speed for strongly
correlated inputs, at the cost of maintaining and updating a full
(M+1) x (M+1) matrix per iteration.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
forgetting_factor : float
Forgetting factor alpha used in the inverse-correlation recursion,
with 0 < forgetting_factor < 1. Values closer to 1 yield smoother tracking; smaller
values adapt faster.
initial_inv_rx : array_like of complex
Initial inverse correlation matrix P(0) with shape (M + 1, M + 1).
Typical choices are scaled identities, e.g. delta^{-1} I.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant used to guard denominators in the matrix recursion.
Default is 1e-12.
Notes
Complex-valued
This implementation assumes complex arithmetic (supports_complex=True),
with the a priori output computed as y[k] = w^H[k] x_k.
Recursion (as implemented) Let the regressor vector be
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1},$$
and define the output and a priori error as
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Maintain an estimate ``P[k] \approx R_x^{-1}`` using a normalized rank-1 update.
With
$$p_k = P[k] x_k, \qquad \phi_k = x_k^H p_k,$$
the denominator is
$$\mathrm{denom}_k = \frac{1-\text{forgetting_factor}}{\text{forgetting_factor}} + \phi_k,$$
and the update used here is
$$P[k+1] =
\frac{1}{1-\text{forgetting_factor}} \left( P[k] - \frac{p_k p_k^H}{\mathrm{denom}_k} \right).$$
The coefficient update uses the preconditioned regressor ``P[k+1] x_k``:
$$w[k+1] = w[k] + \mu\, e^*[k] \, (P[k+1] x_k).$$
Relationship to RLS
The recursion for P is algebraically similar to an RLS covariance update
with a particular normalization; however, the coefficient update remains
LMS-like, controlled by the step size mu.
References
116 def __init__( 117 self, 118 filter_order: int, 119 forgetting_factor: float, 120 initial_inv_rx: np.ndarray, 121 step_size: float = 1e-2, 122 w_init: Optional[ArrayLike] = None, 123 *, 124 safe_eps: float = 1e-12, 125 ) -> None: 126 super().__init__(filter_order=int(filter_order), w_init=w_init) 127 128 self.forgetting_factor = float(forgetting_factor) 129 if not (0.0 < self.forgetting_factor < 1.0): 130 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor < 1. Got forgetting_factor={self.forgetting_factor}.") 131 132 P0 = np.asarray(initial_inv_rx, dtype=complex) 133 n_taps = int(filter_order) + 1 134 if P0.shape != (n_taps, n_taps): 135 raise ValueError( 136 f"initial_inv_rx must have shape {(n_taps, n_taps)}. Got {P0.shape}." 137 ) 138 self.inv_rx = P0 139 140 self.step_size = float(step_size) 141 self._safe_eps = float(safe_eps)
143 @validate_input 144 def optimize( 145 self, 146 input_signal: np.ndarray, 147 desired_signal: np.ndarray, 148 verbose: bool = False, 149 ) -> OptimizationResult: 150 """ 151 Executes the LMS-Newton adaptation loop over paired input/desired sequences. 152 153 Parameters 154 ---------- 155 input_signal : array_like of complex 156 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 157 desired_signal : array_like of complex 158 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 159 verbose : bool, optional 160 If True, prints the total runtime after completion. 161 162 Returns 163 ------- 164 OptimizationResult 165 Result object with fields: 166 - outputs : ndarray of complex, shape ``(N,)`` 167 Scalar output sequence, ``y[k] = w^H[k] x_k``. 168 - errors : ndarray of complex, shape ``(N,)`` 169 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 170 - coefficients : ndarray of complex 171 Coefficient history recorded by the base class. 172 - error_type : str 173 Set to ``"a_priori"``. 174 """ 175 tic: float = perf_counter() 176 177 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 178 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 179 180 n_samples: int = int(x.size) 181 m: int = int(self.filter_order) 182 183 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 184 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 185 186 x_padded: np.ndarray = np.zeros(n_samples + m, dtype=complex) 187 x_padded[m:] = x 188 189 for k in range(n_samples): 190 x_k: np.ndarray = x_padded[k : k + m + 1][::-1] 191 192 y_k: complex = complex(np.vdot(self.w, x_k)) 193 outputs[k] = y_k 194 195 e_k: complex = d[k] - y_k 196 errors[k] = e_k 197 198 x_col: np.ndarray = x_k.reshape(-1, 1) 199 Px: np.ndarray = self.inv_rx @ x_col 200 phi: complex = (x_col.conj().T @ Px).item() 201 202 denom: complex = ((1.0 - self.forgetting_factor) / self.forgetting_factor) + phi 203 if abs(denom) < self._safe_eps: 204 denom = denom + (self._safe_eps + 0.0j) 205 206 self.inv_rx = (self.inv_rx - (Px @ Px.conj().T) / denom) / (1.0 - self.forgetting_factor) 207 208 self.w = self.w + self.step_size * np.conj(e_k) * Px.ravel() 209 210 self._record_history() 211 212 runtime_s: float = perf_counter() - tic 213 if verbose: 214 print(f"[LMSNewton] Completed in {runtime_s * 1000:.03f} ms") 215 216 return self._pack_results( 217 outputs=outputs, 218 errors=errors, 219 runtime_s=runtime_s, 220 error_type="a_priori", 221 )
Executes the LMS-Newton adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
28class Power2ErrorLMS(AdaptiveFilter): 29 """ 30 Power-of-Two Error LMS adaptive filter (real-valued). 31 32 LMS variant in which the instantaneous a priori error is quantized to a 33 power-of-two level (with special cases for large and very small errors), 34 aiming to reduce computational complexity in fixed-point / low-cost 35 implementations. 36 37 Parameters 38 ---------- 39 filter_order : int 40 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 bd : int 42 Word length (number of bits) used to define the small-error threshold 43 ``2^{-bd+1}``. 44 tau : float 45 Gain factor applied when ``|e[k]|`` is very small (below ``2^{-bd+1}``). 46 step_size : float, optional 47 Adaptation step size ``mu``. Default is 1e-2. 48 w_init : array_like of float, optional 49 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 50 initializes with zeros. 51 52 Notes 53 ----- 54 Real-valued only 55 This implementation is restricted to real-valued signals and coefficients 56 (``supports_complex=False``). The constraint is enforced via 57 ``@ensure_real_signals`` on :meth:`optimize`. 58 59 Signal model and LMS update 60 Let the regressor vector be 61 62 .. math:: 63 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{R}^{M+1}, 64 65 with output and a priori error 66 67 .. math:: 68 y[k] = w^T[k] x_k, \\qquad e[k] = d[k] - y[k]. 69 70 The update uses a quantized error ``q(e[k])``: 71 72 .. math:: 73 w[k+1] = w[k] + 2\\mu\\, q(e[k])\\, x_k. 74 75 Error quantization (as implemented) 76 Define the small-error threshold 77 78 .. math:: 79 \\epsilon = 2^{-bd+1}. 80 81 Then the quantizer is 82 83 .. math:: 84 q(e) = 85 \\begin{cases} 86 \\operatorname{sign}(e), & |e| \\ge 1, \\\\ 87 \\tau\\,\\operatorname{sign}(e), & |e| < \\epsilon, \\\\ 88 2^{\\lfloor \\log_2(|e|) \\rfloor}\\,\\operatorname{sign}(e), 89 & \\text{otherwise.} 90 \\end{cases} 91 92 Note that ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` then 93 ``q(e[k]) = 0`` and the update is null. 94 95 References 96 ---------- 97 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 98 Implementation*, 5th ed., Algorithm 4.1 (modified complexity-reduced LMS variants). 99 """ 100 101 supports_complex: bool = False 102 103 def __init__( 104 self, 105 filter_order: int, 106 bd: int, 107 tau: float, 108 step_size: float = 1e-2, 109 w_init: Optional[ArrayLike] = None, 110 ) -> None: 111 super().__init__(filter_order=int(filter_order), w_init=w_init) 112 self.bd = int(bd) 113 self.tau = float(tau) 114 self.step_size = float(step_size) 115 116 if self.bd <= 0: 117 raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.") 118 119 @validate_input 120 @ensure_real_signals 121 def optimize( 122 self, 123 input_signal: np.ndarray, 124 desired_signal: np.ndarray, 125 verbose: bool = False, 126 return_internal_states: bool = False, 127 ) -> OptimizationResult: 128 """ 129 Executes the Power-of-Two Error LMS adaptation loop over paired sequences. 130 131 Parameters 132 ---------- 133 input_signal : array_like of float 134 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 135 desired_signal : array_like of float 136 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 137 verbose : bool, optional 138 If True, prints the total runtime after completion. 139 return_internal_states : bool, optional 140 If True, includes the last internal states in ``result.extra``: 141 ``"last_quantized_error"`` (``q(e[k])``) and ``"small_threshold"`` 142 (``2^{-bd+1}``). 143 144 Returns 145 ------- 146 OptimizationResult 147 Result object with fields: 148 - outputs : ndarray of float, shape ``(N,)`` 149 Scalar output sequence, ``y[k] = w^T[k] x_k``. 150 - errors : ndarray of float, shape ``(N,)`` 151 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 152 - coefficients : ndarray of float 153 Coefficient history recorded by the base class. 154 - error_type : str 155 Set to ``"a_priori"``. 156 - extra : dict, optional 157 Present only if ``return_internal_states=True``. 158 """ 159 t0 = perf_counter() 160 161 x = np.asarray(input_signal, dtype=np.float64).ravel() 162 d = np.asarray(desired_signal, dtype=np.float64).ravel() 163 164 n_samples = int(x.size) 165 m = int(self.filter_order) 166 167 outputs = np.zeros(n_samples, dtype=np.float64) 168 errors = np.zeros(n_samples, dtype=np.float64) 169 170 x_padded = np.zeros(n_samples + m, dtype=np.float64) 171 x_padded[m:] = x 172 173 last_qe: Optional[float] = None 174 small_thr = 2.0 ** (-self.bd + 1) 175 176 for k in range(n_samples): 177 x_k = x_padded[k : k + m + 1][::-1] 178 179 y_k = float(np.dot(self.w, x_k)) 180 outputs[k] = y_k 181 182 e_k = float(d[k] - y_k) 183 errors[k] = e_k 184 185 abs_error = abs(e_k) 186 if abs_error >= 1.0: 187 qe = float(np.sign(e_k)) 188 elif abs_error < small_thr: 189 qe = float(self.tau * np.sign(e_k)) 190 else: 191 qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k)) 192 193 last_qe = qe 194 195 self.w = self.w + (2.0 * self.step_size) * qe * x_k 196 self._record_history() 197 198 runtime_s = float(perf_counter() - t0) 199 if verbose: 200 print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms") 201 202 extra: Optional[Dict[str, Any]] = None 203 if return_internal_states: 204 extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)} 205 206 return self._pack_results( 207 outputs=outputs, 208 errors=errors, 209 runtime_s=runtime_s, 210 error_type="a_priori", 211 extra=extra, 212 )
Power-of-Two Error LMS adaptive filter (real-valued).
LMS variant in which the instantaneous a priori error is quantized to a power-of-two level (with special cases for large and very small errors), aiming to reduce computational complexity in fixed-point / low-cost implementations.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
bd : int
Word length (number of bits) used to define the small-error threshold
2^{-bd+1}.
tau : float
Gain factor applied when |e[k]| is very small (below 2^{-bd+1}).
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Signal model and LMS update Let the regressor vector be
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{R}^{M+1},$$
with output and a priori error
$$y[k] = w^T[k] x_k, \qquad e[k] = d[k] - y[k].$$
The update uses a quantized error ``q(e[k])``:
$$w[k+1] = w[k] + 2\mu\, q(e[k])\, x_k.$$
Error quantization (as implemented) Define the small-error threshold
$$\epsilon = 2^{-bd+1}.$$
Then the quantizer is
$$q(e) =
\begin{cases} \operatorname{sign}(e), & |e| \ge 1, \ \tau\,\operatorname{sign}(e), & |e| < \epsilon, \ 2^{\lfloor \log_2(|e|) \rfloor}\,\operatorname{sign}(e), & \text{otherwise.} \end{cases}$$
Note that ``numpy.sign(0) = 0``; therefore if ``e[k] == 0`` then
``q(e[k]) = 0`` and the update is null.
References
103 def __init__( 104 self, 105 filter_order: int, 106 bd: int, 107 tau: float, 108 step_size: float = 1e-2, 109 w_init: Optional[ArrayLike] = None, 110 ) -> None: 111 super().__init__(filter_order=int(filter_order), w_init=w_init) 112 self.bd = int(bd) 113 self.tau = float(tau) 114 self.step_size = float(step_size) 115 116 if self.bd <= 0: 117 raise ValueError(f"bd must be a positive integer. Got bd={self.bd}.")
119 @validate_input 120 @ensure_real_signals 121 def optimize( 122 self, 123 input_signal: np.ndarray, 124 desired_signal: np.ndarray, 125 verbose: bool = False, 126 return_internal_states: bool = False, 127 ) -> OptimizationResult: 128 """ 129 Executes the Power-of-Two Error LMS adaptation loop over paired sequences. 130 131 Parameters 132 ---------- 133 input_signal : array_like of float 134 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 135 desired_signal : array_like of float 136 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 137 verbose : bool, optional 138 If True, prints the total runtime after completion. 139 return_internal_states : bool, optional 140 If True, includes the last internal states in ``result.extra``: 141 ``"last_quantized_error"`` (``q(e[k])``) and ``"small_threshold"`` 142 (``2^{-bd+1}``). 143 144 Returns 145 ------- 146 OptimizationResult 147 Result object with fields: 148 - outputs : ndarray of float, shape ``(N,)`` 149 Scalar output sequence, ``y[k] = w^T[k] x_k``. 150 - errors : ndarray of float, shape ``(N,)`` 151 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 152 - coefficients : ndarray of float 153 Coefficient history recorded by the base class. 154 - error_type : str 155 Set to ``"a_priori"``. 156 - extra : dict, optional 157 Present only if ``return_internal_states=True``. 158 """ 159 t0 = perf_counter() 160 161 x = np.asarray(input_signal, dtype=np.float64).ravel() 162 d = np.asarray(desired_signal, dtype=np.float64).ravel() 163 164 n_samples = int(x.size) 165 m = int(self.filter_order) 166 167 outputs = np.zeros(n_samples, dtype=np.float64) 168 errors = np.zeros(n_samples, dtype=np.float64) 169 170 x_padded = np.zeros(n_samples + m, dtype=np.float64) 171 x_padded[m:] = x 172 173 last_qe: Optional[float] = None 174 small_thr = 2.0 ** (-self.bd + 1) 175 176 for k in range(n_samples): 177 x_k = x_padded[k : k + m + 1][::-1] 178 179 y_k = float(np.dot(self.w, x_k)) 180 outputs[k] = y_k 181 182 e_k = float(d[k] - y_k) 183 errors[k] = e_k 184 185 abs_error = abs(e_k) 186 if abs_error >= 1.0: 187 qe = float(np.sign(e_k)) 188 elif abs_error < small_thr: 189 qe = float(self.tau * np.sign(e_k)) 190 else: 191 qe = float((2.0 ** np.floor(np.log2(abs_error))) * np.sign(e_k)) 192 193 last_qe = qe 194 195 self.w = self.w + (2.0 * self.step_size) * qe * x_k 196 self._record_history() 197 198 runtime_s = float(perf_counter() - t0) 199 if verbose: 200 print(f"[Power2ErrorLMS] Completed in {runtime_s * 1000:.03f} ms") 201 202 extra: Optional[Dict[str, Any]] = None 203 if return_internal_states: 204 extra = {"last_quantized_error": last_qe, "small_threshold": float(small_thr)} 205 206 return self._pack_results( 207 outputs=outputs, 208 errors=errors, 209 runtime_s=runtime_s, 210 error_type="a_priori", 211 extra=extra, 212 )
Executes the Power-of-Two Error LMS adaptation loop over paired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"last_quantized_error" (q(e[k])) and "small_threshold"
(2^{-bd+1}).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence, y[k] = w^T[k] x_k.
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
27class TDomainLMS(AdaptiveFilter): 28 """ 29 Transform-Domain LMS with a user-provided transform matrix. 30 31 Generic transform-domain LMS algorithm (Diniz, Alg. 4.4) parameterized by a 32 transform matrix ``T``. At each iteration, the time-domain regressor is 33 mapped to the transform domain, adaptation is performed with per-bin 34 normalization using a smoothed power estimate, and time-domain coefficients 35 are recovered from the transform-domain weights. 36 37 Parameters 38 ---------- 39 filter_order : int 40 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 The transform size must be ``(M + 1, M + 1)``. 42 gamma : float 43 Regularization factor ``gamma`` used in the per-bin normalization 44 denominator to avoid division by zero (or near-zero power). 45 alpha : float 46 Smoothing factor ``alpha`` for the transform-bin power estimate, 47 typically close to 1. 48 initial_power : float 49 Initial power estimate used to initialize all transform bins. 50 transform_matrix : array_like of complex 51 Transform matrix ``T`` with shape ``(M + 1, M + 1)``. 52 Typically unitary (``T^H T = I``). 53 step_size : float, optional 54 Adaptation step size ``mu``. Default is 1e-2. 55 w_init : array_like of complex, optional 56 Initial **time-domain** coefficient vector ``w(0)`` with shape ``(M + 1,)``. 57 If None, initializes with zeros. 58 assume_unitary : bool, optional 59 If True (default), maps transform-domain weights back to the time domain 60 using ``w = T^H w_T`` (fast). If False, uses a pseudo-inverse mapping 61 ``w = pinv(T)^H w_T`` (slower but works for non-unitary ``T``). 62 63 Notes 64 ----- 65 At iteration ``k``, form the time-domain regressor vector (newest sample first): 66 67 .. math:: 68 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 69 70 Define the transform-domain regressor: 71 72 .. math:: 73 z_k = T x_k. 74 75 Adaptation is performed in the transform domain with weights ``w_T[k]``. 76 The a priori output and error are 77 78 .. math:: 79 y[k] = w_T^H[k] z_k, \\qquad e[k] = d[k] - y[k]. 80 81 A smoothed per-bin power estimate ``p[k]`` is updated as 82 83 .. math:: 84 p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1], 85 86 where ``|z_k|^2`` is taken element-wise. 87 88 The normalized transform-domain LMS update used here is 89 90 .. math:: 91 w_T[k+1] = w_T[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]}, 92 93 with element-wise division. 94 95 Mapping back to time domain 96 If ``T`` is unitary (``T^H T = I``), then the inverse mapping is 97 98 .. math:: 99 w[k] = T^H w_T[k]. 100 101 If ``T`` is not unitary and ``assume_unitary=False``, this implementation 102 uses the pseudo-inverse mapping: 103 104 .. math:: 105 w[k] = \\operatorname{pinv}(T)^H w_T[k]. 106 107 Implementation details 108 - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient 109 history recorded by the base class (``self.w`` after mapping back). 110 - If ``return_internal_states=True``, the transform-domain coefficient history 111 is returned in ``result.extra["coefficients_transform"]``. 112 113 References 114 ---------- 115 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 116 Implementation*, 5th ed., Algorithm 4.4. 117 """ 118 119 supports_complex: bool = True 120 121 def __init__( 122 self, 123 filter_order: int, 124 gamma: float, 125 alpha: float, 126 initial_power: float, 127 transform_matrix: np.ndarray, 128 step_size: float = 1e-2, 129 w_init: Optional[ArrayLike] = None, 130 *, 131 assume_unitary: bool = True, 132 ) -> None: 133 super().__init__(filter_order=int(filter_order), w_init=w_init) 134 135 self.gamma = float(gamma) 136 self.alpha = float(alpha) 137 self.step_size = float(step_size) 138 139 self.N = int(self.filter_order + 1) 140 141 T = np.asarray(transform_matrix, dtype=complex) 142 if T.shape != (self.N, self.N): 143 raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.") 144 145 self.T = T 146 self._assume_unitary = bool(assume_unitary) 147 148 # transform-domain weights (start from time-domain w) 149 self.w_T = self.T @ np.asarray(self.w, dtype=complex) 150 151 # power estimate per transform bin 152 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 153 154 # optional transform-domain history 155 self._w_history_T: List[np.ndarray] = [self.w_T.copy()] 156 157 def _to_time_domain(self, w_T: np.ndarray) -> np.ndarray: 158 """Map transform-domain weights to time-domain weights.""" 159 if self._assume_unitary: 160 return self.T.conj().T @ w_T 161 # fallback for non-unitary transforms (more expensive) 162 T_pinv = np.linalg.pinv(self.T) 163 return T_pinv.conj().T @ w_T 164 165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Executes the Transform-Domain LMS adaptation loop. 175 176 Parameters 177 ---------- 178 input_signal : array_like of complex 179 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 180 desired_signal : array_like of complex 181 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 182 verbose : bool, optional 183 If True, prints the total runtime after completion. 184 return_internal_states : bool, optional 185 If True, includes transform-domain internal states in ``result.extra``: 186 ``"coefficients_transform"``, ``"power_vector_last"``, 187 ``"transform_matrix"``, and ``"assume_unitary"``. 188 189 Returns 190 ------- 191 OptimizationResult 192 Result object with fields: 193 - outputs : ndarray of complex, shape ``(N,)`` 194 Scalar a priori output sequence, ``y[k] = w_T^H[k] z_k``. 195 - errors : ndarray of complex, shape ``(N,)`` 196 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 197 - coefficients : ndarray of complex 198 **Time-domain** coefficient history recorded by the base class. 199 - error_type : str 200 Set to ``"a_priori"``. 201 - extra : dict, optional 202 Present only if ``return_internal_states=True`` with: 203 - ``coefficients_transform`` : ndarray of complex 204 Transform-domain coefficient history. 205 - ``power_vector_last`` : ndarray of float 206 Final per-bin power estimate ``p[k]``. 207 - ``transform_matrix`` : ndarray of complex 208 The transform matrix ``T`` used (shape ``(M+1, M+1)``). 209 - ``assume_unitary`` : bool 210 Whether the inverse mapping assumed ``T`` is unitary. 211 """ 212 t0 = perf_counter() 213 214 x = np.asarray(input_signal, dtype=complex).ravel() 215 d = np.asarray(desired_signal, dtype=complex).ravel() 216 217 n_samples = int(d.size) 218 m = int(self.filter_order) 219 220 outputs = np.zeros(n_samples, dtype=complex) 221 errors = np.zeros(n_samples, dtype=complex) 222 223 x_padded = np.zeros(n_samples + m, dtype=complex) 224 x_padded[m:] = x 225 226 w_hist_T: List[np.ndarray] = [self.w_T.copy()] 227 228 for k in range(n_samples): 229 x_k = x_padded[k : k + m + 1][::-1] 230 z_k = self.T @ x_k 231 232 self.power_vector = ( 233 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 234 ) 235 236 y_k = complex(np.vdot(self.w_T, z_k)) 237 outputs[k] = y_k 238 239 e_k = d[k] - y_k 240 errors[k] = e_k 241 242 denom = self.gamma + self.power_vector 243 self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom) 244 245 self.w = self._to_time_domain(self.w_T) 246 247 self._record_history() 248 w_hist_T.append(self.w_T.copy()) 249 250 runtime_s = float(perf_counter() - t0) 251 if verbose: 252 print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms") 253 254 extra: Optional[Dict[str, Any]] = None 255 if return_internal_states: 256 extra = { 257 "coefficients_transform": np.asarray(w_hist_T), 258 "power_vector_last": self.power_vector.copy(), 259 "transform_matrix": self.T.copy(), 260 "assume_unitary": self._assume_unitary, 261 } 262 263 return self._pack_results( 264 outputs=outputs, 265 errors=errors, 266 runtime_s=runtime_s, 267 error_type="a_priori", 268 extra=extra, 269 )
Transform-Domain LMS with a user-provided transform matrix.
Generic transform-domain LMS algorithm (Diniz, Alg. 4.4) parameterized by a
transform matrix T. At each iteration, the time-domain regressor is
mapped to the transform domain, adaptation is performed with per-bin
normalization using a smoothed power estimate, and time-domain coefficients
are recovered from the transform-domain weights.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
The transform size must be (M + 1, M + 1).
gamma : float
Regularization factor gamma used in the per-bin normalization
denominator to avoid division by zero (or near-zero power).
alpha : float
Smoothing factor alpha for the transform-bin power estimate,
typically close to 1.
initial_power : float
Initial power estimate used to initialize all transform bins.
transform_matrix : array_like of complex
Transform matrix T with shape (M + 1, M + 1).
Typically unitary (T^H T = I).
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial **time-domain** coefficient vector w(0) with shape (M + 1,).
If None, initializes with zeros.
assume_unitary : bool, optional
If True (default), maps transform-domain weights back to the time domain
using w = T^H w_T (fast). If False, uses a pseudo-inverse mapping
w = pinv(T)^H w_T (slower but works for non-unitary T).
Notes
At iteration k, form the time-domain regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
Define the transform-domain regressor:
$$z_k = T x_k.$$
Adaptation is performed in the transform domain with weights w_T[k].
The a priori output and error are
$$y[k] = w_T^H[k] z_k, \qquad e[k] = d[k] - y[k].$$
A smoothed per-bin power estimate p[k] is updated as
$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$
where |z_k|^2 is taken element-wise.
The normalized transform-domain LMS update used here is
$$w_T[k+1] = w_T[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$
with element-wise division.
Mapping back to time domain
If T is unitary (T^H T = I), then the inverse mapping is
$$w[k] = T^H w_T[k].$$
If ``T`` is not unitary and ``assume_unitary=False``, this implementation
uses the pseudo-inverse mapping:
$$w[k] = \operatorname{pinv}(T)^H w_T[k].$$
Implementation details
- OptimizationResult.coefficients stores the time-domain coefficient
history recorded by the base class (self.w after mapping back).
- If return_internal_states=True, the transform-domain coefficient history
is returned in result.extra["coefficients_transform"].
References
121 def __init__( 122 self, 123 filter_order: int, 124 gamma: float, 125 alpha: float, 126 initial_power: float, 127 transform_matrix: np.ndarray, 128 step_size: float = 1e-2, 129 w_init: Optional[ArrayLike] = None, 130 *, 131 assume_unitary: bool = True, 132 ) -> None: 133 super().__init__(filter_order=int(filter_order), w_init=w_init) 134 135 self.gamma = float(gamma) 136 self.alpha = float(alpha) 137 self.step_size = float(step_size) 138 139 self.N = int(self.filter_order + 1) 140 141 T = np.asarray(transform_matrix, dtype=complex) 142 if T.shape != (self.N, self.N): 143 raise ValueError(f"transform_matrix must have shape {(self.N, self.N)}. Got {T.shape}.") 144 145 self.T = T 146 self._assume_unitary = bool(assume_unitary) 147 148 # transform-domain weights (start from time-domain w) 149 self.w_T = self.T @ np.asarray(self.w, dtype=complex) 150 151 # power estimate per transform bin 152 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 153 154 # optional transform-domain history 155 self._w_history_T: List[np.ndarray] = [self.w_T.copy()]
165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Executes the Transform-Domain LMS adaptation loop. 175 176 Parameters 177 ---------- 178 input_signal : array_like of complex 179 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 180 desired_signal : array_like of complex 181 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 182 verbose : bool, optional 183 If True, prints the total runtime after completion. 184 return_internal_states : bool, optional 185 If True, includes transform-domain internal states in ``result.extra``: 186 ``"coefficients_transform"``, ``"power_vector_last"``, 187 ``"transform_matrix"``, and ``"assume_unitary"``. 188 189 Returns 190 ------- 191 OptimizationResult 192 Result object with fields: 193 - outputs : ndarray of complex, shape ``(N,)`` 194 Scalar a priori output sequence, ``y[k] = w_T^H[k] z_k``. 195 - errors : ndarray of complex, shape ``(N,)`` 196 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 197 - coefficients : ndarray of complex 198 **Time-domain** coefficient history recorded by the base class. 199 - error_type : str 200 Set to ``"a_priori"``. 201 - extra : dict, optional 202 Present only if ``return_internal_states=True`` with: 203 - ``coefficients_transform`` : ndarray of complex 204 Transform-domain coefficient history. 205 - ``power_vector_last`` : ndarray of float 206 Final per-bin power estimate ``p[k]``. 207 - ``transform_matrix`` : ndarray of complex 208 The transform matrix ``T`` used (shape ``(M+1, M+1)``). 209 - ``assume_unitary`` : bool 210 Whether the inverse mapping assumed ``T`` is unitary. 211 """ 212 t0 = perf_counter() 213 214 x = np.asarray(input_signal, dtype=complex).ravel() 215 d = np.asarray(desired_signal, dtype=complex).ravel() 216 217 n_samples = int(d.size) 218 m = int(self.filter_order) 219 220 outputs = np.zeros(n_samples, dtype=complex) 221 errors = np.zeros(n_samples, dtype=complex) 222 223 x_padded = np.zeros(n_samples + m, dtype=complex) 224 x_padded[m:] = x 225 226 w_hist_T: List[np.ndarray] = [self.w_T.copy()] 227 228 for k in range(n_samples): 229 x_k = x_padded[k : k + m + 1][::-1] 230 z_k = self.T @ x_k 231 232 self.power_vector = ( 233 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 234 ) 235 236 y_k = complex(np.vdot(self.w_T, z_k)) 237 outputs[k] = y_k 238 239 e_k = d[k] - y_k 240 errors[k] = e_k 241 242 denom = self.gamma + self.power_vector 243 self.w_T = self.w_T + self.step_size * np.conj(e_k) * (z_k / denom) 244 245 self.w = self._to_time_domain(self.w_T) 246 247 self._record_history() 248 w_hist_T.append(self.w_T.copy()) 249 250 runtime_s = float(perf_counter() - t0) 251 if verbose: 252 print(f"[TDomainLMS] Completed in {runtime_s * 1000:.03f} ms") 253 254 extra: Optional[Dict[str, Any]] = None 255 if return_internal_states: 256 extra = { 257 "coefficients_transform": np.asarray(w_hist_T), 258 "power_vector_last": self.power_vector.copy(), 259 "transform_matrix": self.T.copy(), 260 "assume_unitary": self._assume_unitary, 261 } 262 263 return self._pack_results( 264 outputs=outputs, 265 errors=errors, 266 runtime_s=runtime_s, 267 error_type="a_priori", 268 extra=extra, 269 )
Executes the Transform-Domain LMS adaptation loop.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes transform-domain internal states in result.extra:
"coefficients_transform", "power_vector_last",
"transform_matrix", and "assume_unitary".
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence, y[k] = w_T^H[k] z_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Time-domain coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- coefficients_transform : ndarray of complex
Transform-domain coefficient history.
- power_vector_last : ndarray of float
Final per-bin power estimate p[k].
- transform_matrix : ndarray of complex
The transform matrix T used (shape (M+1, M+1)).
- assume_unitary : bool
Whether the inverse mapping assumed T is unitary.
28class TDomainDCT(AdaptiveFilter): 29 """ 30 Transform-Domain LMS using an orthonormal DCT (complex-valued). 31 32 Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain 33 regressor vector is mapped to a decorrelated transform domain using an 34 orthonormal Discrete Cosine Transform (DCT). Adaptation is performed in the 35 transform domain with per-bin normalization based on a smoothed power 36 estimate. The time-domain coefficient vector is recovered from the 37 transform-domain weights. 38 39 Parameters 40 ---------- 41 filter_order : int 42 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 43 gamma : float 44 Regularization factor ``gamma`` used in the per-bin normalization 45 denominator to avoid division by zero (or near-zero power). 46 alpha : float 47 Smoothing factor ``alpha`` for the transform-bin power estimate, 48 typically close to 1. 49 initial_power : float 50 Initial power estimate used to initialize all transform bins. 51 step_size : float, optional 52 Adaptation step size ``mu``. Default is 1e-2. 53 w_init : array_like of complex, optional 54 Initial time-domain coefficient vector ``w(0)`` with shape ``(M + 1,)``. 55 If None, initializes with zeros. 56 57 Notes 58 ----- 59 At iteration ``k``, form the time-domain regressor vector (newest sample first): 60 61 .. math:: 62 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 63 64 Let ``T`` be the orthonormal DCT matrix of size ``(M+1) x (M+1)`` 65 (real-valued, with ``T^T T = I``). The transform-domain regressor is 66 67 .. math:: 68 z_k = T x_k. 69 70 Adaptation is performed in the transform domain with weights ``w_z[k]``. 71 The a priori output and error are 72 73 .. math:: 74 y[k] = w_z^H[k] z_k, \\qquad e[k] = d[k] - y[k]. 75 76 A smoothed per-bin power estimate ``p[k]`` is updated as 77 78 .. math:: 79 p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1], 80 81 where ``|z_k|^2`` is taken element-wise (i.e., ``|z_{k,i}|^2``). 82 83 The normalized transform-domain LMS update used here is 84 85 .. math:: 86 w_z[k+1] = w_z[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]}, 87 88 where the division is element-wise. 89 90 The time-domain coefficients are recovered using orthonormality of ``T``: 91 92 .. math:: 93 w[k] = T^T w_z[k]. 94 95 Implementation details 96 - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient 97 history recorded by the base class (``self.w`` after the inverse transform). 98 - If ``return_internal_states=True``, the transform-domain coefficient history 99 is returned in ``result.extra["coefficients_dct"]``. 100 101 References 102 ---------- 103 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 104 Implementation*, 5th ed., Algorithm 4.4. 105 """ 106 107 supports_complex: bool = True 108 109 def __init__( 110 self, 111 filter_order: int, 112 gamma: float, 113 alpha: float, 114 initial_power: float, 115 step_size: float = 1e-2, 116 w_init: Optional[ArrayLike] = None, 117 ) -> None: 118 super().__init__(filter_order=int(filter_order), w_init=w_init) 119 120 self.gamma = float(gamma) 121 self.alpha = float(alpha) 122 self.step_size = float(step_size) 123 124 self.N = int(self.filter_order + 1) 125 126 self.T = dct(np.eye(self.N), norm="ortho", axis=0) 127 128 self.w_dct = self.T @ np.asarray(self.w, dtype=complex) 129 130 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 131 132 self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()] 133 134 @validate_input 135 def optimize( 136 self, 137 input_signal: np.ndarray, 138 desired_signal: np.ndarray, 139 verbose: bool = False, 140 return_internal_states: bool = False, 141 ) -> OptimizationResult: 142 """ 143 Executes the Transform-Domain LMS (DCT) adaptation loop. 144 145 Parameters 146 ---------- 147 input_signal : array_like of complex 148 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 149 desired_signal : array_like of complex 150 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 151 verbose : bool, optional 152 If True, prints the total runtime after completion. 153 return_internal_states : bool, optional 154 If True, includes transform-domain internal states in ``result.extra``: 155 ``"coefficients_dct"``, ``"power_vector_last"``, and ``"dct_matrix"``. 156 157 Returns 158 ------- 159 OptimizationResult 160 Result object with fields: 161 - outputs : ndarray of complex, shape ``(N,)`` 162 Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``. 163 - errors : ndarray of complex, shape ``(N,)`` 164 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 165 - coefficients : ndarray of complex 166 **Time-domain** coefficient history recorded by the base class. 167 - error_type : str 168 Set to ``"a_priori"``. 169 - extra : dict, optional 170 Present only if ``return_internal_states=True`` with: 171 - ``coefficients_dct`` : ndarray of complex 172 Transform-domain coefficient history. 173 - ``power_vector_last`` : ndarray of float 174 Final per-bin power estimate ``p[k]``. 175 - ``dct_matrix`` : ndarray of float 176 The DCT matrix ``T`` used (shape ``(M+1, M+1)``). 177 """ 178 t0 = perf_counter() 179 180 x = np.asarray(input_signal, dtype=complex).ravel() 181 d = np.asarray(desired_signal, dtype=complex).ravel() 182 183 n_samples = int(d.size) 184 m = int(self.filter_order) 185 186 outputs = np.zeros(n_samples, dtype=complex) 187 errors = np.zeros(n_samples, dtype=complex) 188 189 x_padded = np.zeros(n_samples + m, dtype=complex) 190 x_padded[m:] = x 191 192 w_hist_dct: List[np.ndarray] = [self.w_dct.copy()] 193 194 for k in range(n_samples): 195 x_k = x_padded[k : k + m + 1][::-1] 196 z_k = self.T @ x_k 197 198 self.power_vector = ( 199 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 200 ) 201 202 y_k = complex(np.vdot(self.w_dct, z_k)) 203 outputs[k] = y_k 204 205 e_k = d[k] - y_k 206 errors[k] = e_k 207 208 denom = self.gamma + self.power_vector 209 self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom) 210 211 self.w = self.T.T @ self.w_dct 212 213 self._record_history() 214 w_hist_dct.append(self.w_dct.copy()) 215 216 runtime_s = float(perf_counter() - t0) 217 if verbose: 218 print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms") 219 220 extra: Optional[Dict[str, Any]] = None 221 if return_internal_states: 222 extra = { 223 "coefficients_dct": np.asarray(w_hist_dct), 224 "power_vector_last": self.power_vector.copy(), 225 "dct_matrix": self.T.copy(), 226 } 227 228 return self._pack_results( 229 outputs=outputs, 230 errors=errors, 231 runtime_s=runtime_s, 232 error_type="a_priori", 233 extra=extra, 234 )
Transform-Domain LMS using an orthonormal DCT (complex-valued).
Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain regressor vector is mapped to a decorrelated transform domain using an orthonormal Discrete Cosine Transform (DCT). Adaptation is performed in the transform domain with per-bin normalization based on a smoothed power estimate. The time-domain coefficient vector is recovered from the transform-domain weights.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
gamma : float
Regularization factor gamma used in the per-bin normalization
denominator to avoid division by zero (or near-zero power).
alpha : float
Smoothing factor alpha for the transform-bin power estimate,
typically close to 1.
initial_power : float
Initial power estimate used to initialize all transform bins.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial time-domain coefficient vector w(0) with shape (M + 1,).
If None, initializes with zeros.
Notes
At iteration k, form the time-domain regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
Let T be the orthonormal DCT matrix of size (M+1) x (M+1)
(real-valued, with T^T T = I). The transform-domain regressor is
$$z_k = T x_k.$$
Adaptation is performed in the transform domain with weights w_z[k].
The a priori output and error are
$$y[k] = w_z^H[k] z_k, \qquad e[k] = d[k] - y[k].$$
A smoothed per-bin power estimate p[k] is updated as
$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$
where |z_k|^2 is taken element-wise (i.e., |z_{k,i}|^2).
The normalized transform-domain LMS update used here is
$$w_z[k+1] = w_z[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$
where the division is element-wise.
The time-domain coefficients are recovered using orthonormality of T:
$$w[k] = T^T w_z[k].$$
Implementation details
- OptimizationResult.coefficients stores the time-domain coefficient
history recorded by the base class (self.w after the inverse transform).
- If return_internal_states=True, the transform-domain coefficient history
is returned in result.extra["coefficients_dct"].
References
109 def __init__( 110 self, 111 filter_order: int, 112 gamma: float, 113 alpha: float, 114 initial_power: float, 115 step_size: float = 1e-2, 116 w_init: Optional[ArrayLike] = None, 117 ) -> None: 118 super().__init__(filter_order=int(filter_order), w_init=w_init) 119 120 self.gamma = float(gamma) 121 self.alpha = float(alpha) 122 self.step_size = float(step_size) 123 124 self.N = int(self.filter_order + 1) 125 126 self.T = dct(np.eye(self.N), norm="ortho", axis=0) 127 128 self.w_dct = self.T @ np.asarray(self.w, dtype=complex) 129 130 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 131 132 self._w_history_dct: List[np.ndarray] = [self.w_dct.copy()]
134 @validate_input 135 def optimize( 136 self, 137 input_signal: np.ndarray, 138 desired_signal: np.ndarray, 139 verbose: bool = False, 140 return_internal_states: bool = False, 141 ) -> OptimizationResult: 142 """ 143 Executes the Transform-Domain LMS (DCT) adaptation loop. 144 145 Parameters 146 ---------- 147 input_signal : array_like of complex 148 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 149 desired_signal : array_like of complex 150 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 151 verbose : bool, optional 152 If True, prints the total runtime after completion. 153 return_internal_states : bool, optional 154 If True, includes transform-domain internal states in ``result.extra``: 155 ``"coefficients_dct"``, ``"power_vector_last"``, and ``"dct_matrix"``. 156 157 Returns 158 ------- 159 OptimizationResult 160 Result object with fields: 161 - outputs : ndarray of complex, shape ``(N,)`` 162 Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``. 163 - errors : ndarray of complex, shape ``(N,)`` 164 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 165 - coefficients : ndarray of complex 166 **Time-domain** coefficient history recorded by the base class. 167 - error_type : str 168 Set to ``"a_priori"``. 169 - extra : dict, optional 170 Present only if ``return_internal_states=True`` with: 171 - ``coefficients_dct`` : ndarray of complex 172 Transform-domain coefficient history. 173 - ``power_vector_last`` : ndarray of float 174 Final per-bin power estimate ``p[k]``. 175 - ``dct_matrix`` : ndarray of float 176 The DCT matrix ``T`` used (shape ``(M+1, M+1)``). 177 """ 178 t0 = perf_counter() 179 180 x = np.asarray(input_signal, dtype=complex).ravel() 181 d = np.asarray(desired_signal, dtype=complex).ravel() 182 183 n_samples = int(d.size) 184 m = int(self.filter_order) 185 186 outputs = np.zeros(n_samples, dtype=complex) 187 errors = np.zeros(n_samples, dtype=complex) 188 189 x_padded = np.zeros(n_samples + m, dtype=complex) 190 x_padded[m:] = x 191 192 w_hist_dct: List[np.ndarray] = [self.w_dct.copy()] 193 194 for k in range(n_samples): 195 x_k = x_padded[k : k + m + 1][::-1] 196 z_k = self.T @ x_k 197 198 self.power_vector = ( 199 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 200 ) 201 202 y_k = complex(np.vdot(self.w_dct, z_k)) 203 outputs[k] = y_k 204 205 e_k = d[k] - y_k 206 errors[k] = e_k 207 208 denom = self.gamma + self.power_vector 209 self.w_dct = self.w_dct + self.step_size * np.conj(e_k) * (z_k / denom) 210 211 self.w = self.T.T @ self.w_dct 212 213 self._record_history() 214 w_hist_dct.append(self.w_dct.copy()) 215 216 runtime_s = float(perf_counter() - t0) 217 if verbose: 218 print(f"[TDomainDCT] Completed in {runtime_s * 1000:.03f} ms") 219 220 extra: Optional[Dict[str, Any]] = None 221 if return_internal_states: 222 extra = { 223 "coefficients_dct": np.asarray(w_hist_dct), 224 "power_vector_last": self.power_vector.copy(), 225 "dct_matrix": self.T.copy(), 226 } 227 228 return self._pack_results( 229 outputs=outputs, 230 errors=errors, 231 runtime_s=runtime_s, 232 error_type="a_priori", 233 extra=extra, 234 )
Executes the Transform-Domain LMS (DCT) adaptation loop.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes transform-domain internal states in result.extra:
"coefficients_dct", "power_vector_last", and "dct_matrix".
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence, y[k] = w_z^H[k] z_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Time-domain coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- coefficients_dct : ndarray of complex
Transform-domain coefficient history.
- power_vector_last : ndarray of float
Final per-bin power estimate p[k].
- dct_matrix : ndarray of float
The DCT matrix T used (shape (M+1, M+1)).
29class TDomainDFT(AdaptiveFilter): 30 """ 31 Transform-Domain LMS using a unitary DFT (complex-valued). 32 33 Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain 34 regressor is mapped to the frequency domain using a *unitary* Discrete 35 Fourier Transform (DFT). Adaptation is performed in the transform domain 36 with per-bin normalization based on a smoothed power estimate. The time-domain 37 coefficient vector is recovered via the inverse unitary DFT. 38 39 Parameters 40 ---------- 41 filter_order : int 42 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 43 The DFT size is ``N = M + 1``. 44 gamma : float 45 Regularization factor ``gamma`` used in the per-bin normalization 46 denominator to avoid division by zero (or near-zero power). 47 alpha : float 48 Smoothing factor ``alpha`` for the transform-bin power estimate, 49 typically close to 1. 50 initial_power : float 51 Initial power estimate used to initialize all transform bins. 52 step_size : float, optional 53 Adaptation step size ``mu``. Default is 1e-2. 54 w_init : array_like of complex, optional 55 Initial time-domain coefficient vector ``w(0)`` with shape ``(M + 1,)``. 56 If None, initializes with zeros. 57 58 Notes 59 ----- 60 At iteration ``k``, form the time-domain regressor vector (newest sample first): 61 62 .. math:: 63 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T \\in \\mathbb{C}^{N}. 64 65 Define the *unitary* DFT (energy-preserving) transform-domain regressor: 66 67 .. math:: 68 z_k = \\frac{\\mathrm{DFT}(x_k)}{\\sqrt{N}}. 69 70 Adaptation is performed in the transform domain with weights ``w_z[k]``. 71 The a priori output and error are 72 73 .. math:: 74 y[k] = w_z^H[k] z_k, \\qquad e[k] = d[k] - y[k]. 75 76 A smoothed per-bin power estimate ``p[k]`` is updated as 77 78 .. math:: 79 p[k] = \\alpha\\,|z_k|^2 + (1-\\alpha)\\,p[k-1], 80 81 where ``|z_k|^2`` is taken element-wise. 82 83 The normalized transform-domain LMS update used here is 84 85 .. math:: 86 w_z[k+1] = w_z[k] + \\mu\\, e^*[k] \\, \\frac{z_k}{\\gamma + p[k]}, 87 88 with element-wise division. 89 90 The time-domain coefficients are recovered via the inverse unitary DFT: 91 92 .. math:: 93 w[k] = \\mathrm{IDFT}(w_z[k])\\,\\sqrt{N}. 94 95 Implementation details 96 - ``OptimizationResult.coefficients`` stores the **time-domain** coefficient 97 history recorded by the base class (``self.w`` after inverse transform). 98 - If ``return_internal_states=True``, the transform-domain coefficient history 99 is returned in ``result.extra["coefficients_dft"]``. 100 101 References 102 ---------- 103 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 104 Implementation*, 5th ed., Algorithm 4.4. 105 """ 106 107 supports_complex: bool = True 108 109 def __init__( 110 self, 111 filter_order: int, 112 gamma: float, 113 alpha: float, 114 initial_power: float, 115 step_size: float = 1e-2, 116 w_init: Optional[ArrayLike] = None, 117 ) -> None: 118 super().__init__(filter_order=int(filter_order), w_init=w_init) 119 120 self.gamma = float(gamma) 121 self.alpha = float(alpha) 122 self.step_size = float(step_size) 123 124 self.N = int(self.filter_order + 1) 125 self._sqrtN = float(np.sqrt(self.N)) 126 127 self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN 128 129 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 130 131 self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()] 132 133 @validate_input 134 def optimize( 135 self, 136 input_signal: np.ndarray, 137 desired_signal: np.ndarray, 138 verbose: bool = False, 139 return_internal_states: bool = False, 140 ) -> OptimizationResult: 141 """ 142 Executes the Transform-Domain LMS (DFT) adaptation loop. 143 144 Parameters 145 ---------- 146 input_signal : array_like of complex 147 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 148 desired_signal : array_like of complex 149 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 150 verbose : bool, optional 151 If True, prints the total runtime after completion. 152 return_internal_states : bool, optional 153 If True, includes transform-domain internal states in ``result.extra``: 154 ``"coefficients_dft"``, ``"power_vector_last"``, and ``"sqrtN"``. 155 156 Returns 157 ------- 158 OptimizationResult 159 Result object with fields: 160 - outputs : ndarray of complex, shape ``(N,)`` 161 Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``. 162 - errors : ndarray of complex, shape ``(N,)`` 163 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 164 - coefficients : ndarray of complex 165 **Time-domain** coefficient history recorded by the base class. 166 - error_type : str 167 Set to ``"a_priori"``. 168 - extra : dict, optional 169 Present only if ``return_internal_states=True`` with: 170 - ``coefficients_dft`` : ndarray of complex 171 Transform-domain coefficient history. 172 - ``power_vector_last`` : ndarray of float 173 Final per-bin power estimate ``p[k]``. 174 - ``sqrtN`` : float 175 The unitary normalization factor ``\\sqrt{N}``. 176 """ 177 t0 = perf_counter() 178 179 x = np.asarray(input_signal, dtype=complex).ravel() 180 d = np.asarray(desired_signal, dtype=complex).ravel() 181 182 n_samples = int(d.size) 183 m = int(self.filter_order) 184 185 outputs = np.zeros(n_samples, dtype=complex) 186 errors = np.zeros(n_samples, dtype=complex) 187 188 x_padded = np.zeros(n_samples + m, dtype=complex) 189 x_padded[m:] = x 190 191 w_hist_dft: List[np.ndarray] = [self.w_dft.copy()] 192 193 for k in range(n_samples): 194 x_k = x_padded[k : k + m + 1][::-1] 195 z_k = fft(x_k) / self._sqrtN 196 197 self.power_vector = ( 198 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 199 ) 200 201 y_k = complex(np.vdot(self.w_dft, z_k)) 202 outputs[k] = y_k 203 204 e_k = d[k] - y_k 205 errors[k] = e_k 206 207 denom = self.gamma + self.power_vector 208 self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom) 209 210 self.w = ifft(self.w_dft) * self._sqrtN 211 212 self._record_history() 213 w_hist_dft.append(self.w_dft.copy()) 214 215 runtime_s = float(perf_counter() - t0) 216 if verbose: 217 print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms") 218 219 extra: Optional[Dict[str, Any]] = None 220 if return_internal_states: 221 extra = { 222 "coefficients_dft": np.asarray(w_hist_dft), 223 "power_vector_last": self.power_vector.copy(), 224 "sqrtN": self._sqrtN, 225 } 226 227 return self._pack_results( 228 outputs=outputs, 229 errors=errors, 230 runtime_s=runtime_s, 231 error_type="a_priori", 232 extra=extra, 233 )
Transform-Domain LMS using a unitary DFT (complex-valued).
Transform-domain LMS algorithm (Diniz, Alg. 4.4) in which the time-domain regressor is mapped to the frequency domain using a unitary Discrete Fourier Transform (DFT). Adaptation is performed in the transform domain with per-bin normalization based on a smoothed power estimate. The time-domain coefficient vector is recovered via the inverse unitary DFT.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
The DFT size is N = M + 1.
gamma : float
Regularization factor gamma used in the per-bin normalization
denominator to avoid division by zero (or near-zero power).
alpha : float
Smoothing factor alpha for the transform-bin power estimate,
typically close to 1.
initial_power : float
Initial power estimate used to initialize all transform bins.
step_size : float, optional
Adaptation step size mu. Default is 1e-2.
w_init : array_like of complex, optional
Initial time-domain coefficient vector w(0) with shape (M + 1,).
If None, initializes with zeros.
Notes
At iteration k, form the time-domain regressor vector (newest sample first):
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T \in \mathbb{C}^{N}.$$
Define the unitary DFT (energy-preserving) transform-domain regressor:
$$z_k = \frac{\mathrm{DFT}(x_k)}{\sqrt{N}}.$$
Adaptation is performed in the transform domain with weights w_z[k].
The a priori output and error are
$$y[k] = w_z^H[k] z_k, \qquad e[k] = d[k] - y[k].$$
A smoothed per-bin power estimate p[k] is updated as
$$p[k] = \alpha\,|z_k|^2 + (1-\alpha)\,p[k-1],$$
where |z_k|^2 is taken element-wise.
The normalized transform-domain LMS update used here is
$$w_z[k+1] = w_z[k] + \mu\, e^*[k] \, \frac{z_k}{\gamma + p[k]},$$
with element-wise division.
The time-domain coefficients are recovered via the inverse unitary DFT:
$$w[k] = \mathrm{IDFT}(w_z[k])\,\sqrt{N}.$$
Implementation details
- OptimizationResult.coefficients stores the time-domain coefficient
history recorded by the base class (self.w after inverse transform).
- If return_internal_states=True, the transform-domain coefficient history
is returned in result.extra["coefficients_dft"].
References
109 def __init__( 110 self, 111 filter_order: int, 112 gamma: float, 113 alpha: float, 114 initial_power: float, 115 step_size: float = 1e-2, 116 w_init: Optional[ArrayLike] = None, 117 ) -> None: 118 super().__init__(filter_order=int(filter_order), w_init=w_init) 119 120 self.gamma = float(gamma) 121 self.alpha = float(alpha) 122 self.step_size = float(step_size) 123 124 self.N = int(self.filter_order + 1) 125 self._sqrtN = float(np.sqrt(self.N)) 126 127 self.w_dft = fft(np.asarray(self.w, dtype=complex)) / self._sqrtN 128 129 self.power_vector = np.full(self.N, float(initial_power), dtype=float) 130 131 self._w_history_dft: List[np.ndarray] = [self.w_dft.copy()]
133 @validate_input 134 def optimize( 135 self, 136 input_signal: np.ndarray, 137 desired_signal: np.ndarray, 138 verbose: bool = False, 139 return_internal_states: bool = False, 140 ) -> OptimizationResult: 141 """ 142 Executes the Transform-Domain LMS (DFT) adaptation loop. 143 144 Parameters 145 ---------- 146 input_signal : array_like of complex 147 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 148 desired_signal : array_like of complex 149 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 150 verbose : bool, optional 151 If True, prints the total runtime after completion. 152 return_internal_states : bool, optional 153 If True, includes transform-domain internal states in ``result.extra``: 154 ``"coefficients_dft"``, ``"power_vector_last"``, and ``"sqrtN"``. 155 156 Returns 157 ------- 158 OptimizationResult 159 Result object with fields: 160 - outputs : ndarray of complex, shape ``(N,)`` 161 Scalar a priori output sequence, ``y[k] = w_z^H[k] z_k``. 162 - errors : ndarray of complex, shape ``(N,)`` 163 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 164 - coefficients : ndarray of complex 165 **Time-domain** coefficient history recorded by the base class. 166 - error_type : str 167 Set to ``"a_priori"``. 168 - extra : dict, optional 169 Present only if ``return_internal_states=True`` with: 170 - ``coefficients_dft`` : ndarray of complex 171 Transform-domain coefficient history. 172 - ``power_vector_last`` : ndarray of float 173 Final per-bin power estimate ``p[k]``. 174 - ``sqrtN`` : float 175 The unitary normalization factor ``\\sqrt{N}``. 176 """ 177 t0 = perf_counter() 178 179 x = np.asarray(input_signal, dtype=complex).ravel() 180 d = np.asarray(desired_signal, dtype=complex).ravel() 181 182 n_samples = int(d.size) 183 m = int(self.filter_order) 184 185 outputs = np.zeros(n_samples, dtype=complex) 186 errors = np.zeros(n_samples, dtype=complex) 187 188 x_padded = np.zeros(n_samples + m, dtype=complex) 189 x_padded[m:] = x 190 191 w_hist_dft: List[np.ndarray] = [self.w_dft.copy()] 192 193 for k in range(n_samples): 194 x_k = x_padded[k : k + m + 1][::-1] 195 z_k = fft(x_k) / self._sqrtN 196 197 self.power_vector = ( 198 self.alpha * np.real(z_k * np.conj(z_k)) + (1.0 - self.alpha) * self.power_vector 199 ) 200 201 y_k = complex(np.vdot(self.w_dft, z_k)) 202 outputs[k] = y_k 203 204 e_k = d[k] - y_k 205 errors[k] = e_k 206 207 denom = self.gamma + self.power_vector 208 self.w_dft = self.w_dft + self.step_size * np.conj(e_k) * (z_k / denom) 209 210 self.w = ifft(self.w_dft) * self._sqrtN 211 212 self._record_history() 213 w_hist_dft.append(self.w_dft.copy()) 214 215 runtime_s = float(perf_counter() - t0) 216 if verbose: 217 print(f"[TDomainDFT] Completed in {runtime_s * 1000:.03f} ms") 218 219 extra: Optional[Dict[str, Any]] = None 220 if return_internal_states: 221 extra = { 222 "coefficients_dft": np.asarray(w_hist_dft), 223 "power_vector_last": self.power_vector.copy(), 224 "sqrtN": self._sqrtN, 225 } 226 227 return self._pack_results( 228 outputs=outputs, 229 errors=errors, 230 runtime_s=runtime_s, 231 error_type="a_priori", 232 extra=extra, 233 )
Executes the Transform-Domain LMS (DFT) adaptation loop.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes transform-domain internal states in result.extra:
"coefficients_dft", "power_vector_last", and "sqrtN".
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence, y[k] = w_z^H[k] z_k.
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Time-domain coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- coefficients_dft : ndarray of complex
Transform-domain coefficient history.
- power_vector_last : ndarray of float
Final per-bin power estimate p[k].
- sqrtN : float
The unitary normalization factor \sqrt{N}.
26class RLS(AdaptiveFilter): 27 """ 28 Recursive Least Squares (RLS) adaptive filter (complex-valued). 29 30 Exponentially-weighted least-squares adaptive FIR filter following 31 Diniz (Alg. 5.3). The algorithm updates the coefficient vector using a 32 Kalman-gain-like direction and updates an inverse correlation matrix via 33 the matrix inversion lemma. 34 35 Parameters 36 ---------- 37 filter_order : int 38 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 39 delta : float 40 Positive initialization factor for the inverse correlation matrix: 41 ``S_d(0) = (1/delta) I``. 42 forgetting_factor : float 43 Forgetting factor ``lambda`` with ``0 < lambda <= 1``. 44 w_init : array_like of complex, optional 45 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 46 initializes with zeros. 47 safe_eps : float, optional 48 Small positive constant used to guard denominators. Default is 1e-12. 49 50 Notes 51 ----- 52 At iteration ``k``, form the regressor vector (tapped delay line): 53 54 - ``x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}`` 55 56 The a priori output and error are: 57 58 .. math:: 59 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 60 61 Let ``S_d[k-1] ∈ 𝕮^{(M+1)\\times(M+1)}`` denote the inverse correlation 62 estimate. Define the intermediate vector: 63 64 .. math:: 65 \\psi[k] = S_d[k-1] x_k. 66 67 The gain denominator and gain vector are: 68 69 .. math:: 70 \\Delta[k] = \\lambda + x_k^H \\psi[k] 71 = \\lambda + x_k^H S_d[k-1] x_k, 72 73 .. math:: 74 g[k] = \\frac{\\psi[k]}{\\Delta[k]}. 75 76 The coefficient update is: 77 78 .. math:: 79 w[k+1] = w[k] + e^*[k] \\, g[k], 80 81 and the inverse correlation update is: 82 83 .. math:: 84 S_d[k] = \\frac{1}{\\lambda}\\Bigl(S_d[k-1] - g[k] \\psi^H[k]\\Bigr). 85 86 A posteriori quantities 87 If ``return_internal_states=True``, this implementation also computes the 88 a posteriori output/error using the updated weights: 89 90 .. math:: 91 y^{post}[k] = w^H[k+1] x_k, \\qquad e^{post}[k] = d[k] - y^{post}[k]. 92 93 References 94 ---------- 95 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 96 Implementation*, 5th ed., Algorithm 5.3. 97 """ 98 99 supports_complex: bool = True 100 101 forgetting_factor: float 102 delta: float 103 S_d: np.ndarray 104 105 def __init__( 106 self, 107 filter_order: int, 108 delta: float, 109 forgetting_factor: float, 110 w_init: Optional[ArrayLike] = None, 111 *, 112 safe_eps: float = 1e-12, 113 ) -> None: 114 super().__init__(filter_order=int(filter_order), w_init=w_init) 115 116 self.forgetting_factor = float(forgetting_factor) 117 if not (0.0 < self.forgetting_factor <= 1.0): 118 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got forgetting_factor={self.forgetting_factor}.") 119 120 self.delta = float(delta) 121 if self.delta <= 0.0: 122 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 123 124 self._safe_eps = float(safe_eps) 125 126 n_taps = int(self.filter_order) + 1 127 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex) 128 129 @validate_input 130 def optimize( 131 self, 132 input_signal: np.ndarray, 133 desired_signal: np.ndarray, 134 verbose: bool = False, 135 return_internal_states: bool = False, 136 ) -> OptimizationResult: 137 """ 138 Executes the RLS adaptation loop over paired input/desired sequences. 139 140 Parameters 141 ---------- 142 input_signal : array_like of complex 143 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 144 desired_signal : array_like of complex 145 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 146 verbose : bool, optional 147 If True, prints the total runtime after completion. 148 return_internal_states : bool, optional 149 If True, includes a posteriori sequences and final internal states in 150 ``result.extra`` (see below). 151 152 Returns 153 ------- 154 OptimizationResult 155 Result object with fields: 156 - outputs : ndarray of complex, shape ``(N,)`` 157 A priori output sequence, ``y[k] = w^H[k] x_k``. 158 - errors : ndarray of complex, shape ``(N,)`` 159 A priori error sequence, ``e[k] = d[k] - y[k]``. 160 - coefficients : ndarray of complex 161 Coefficient history recorded by the base class. 162 - error_type : str 163 Set to ``"a_priori"``. 164 - extra : dict, optional 165 Present only if ``return_internal_states=True`` with: 166 - ``outputs_posteriori`` : ndarray of complex 167 A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``. 168 - ``errors_posteriori`` : ndarray of complex 169 A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``. 170 - ``S_d_last`` : ndarray of complex 171 Final inverse correlation matrix ``S_d``. 172 - ``gain_last`` : ndarray of complex 173 Last gain vector ``g[k]``. 174 """ 175 tic: float = time() 176 177 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 178 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 179 180 n_samples: int = int(d.size) 181 182 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 183 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 184 185 y_post: Optional[np.ndarray] = None 186 e_post: Optional[np.ndarray] = None 187 if return_internal_states: 188 y_post = np.zeros(n_samples, dtype=complex) 189 e_post = np.zeros(n_samples, dtype=complex) 190 191 last_gain: Optional[np.ndarray] = None 192 193 for k in range(n_samples): 194 self.regressor = np.roll(self.regressor, 1) 195 self.regressor[0] = x[k] 196 197 y_k: complex = complex(np.vdot(self.w, self.regressor)) 198 e_k: complex = d[k] - y_k 199 200 outputs[k] = y_k 201 errors[k] = e_k 202 203 Sx: np.ndarray = self.S_d @ self.regressor 204 den: complex = self.forgetting_factor + complex(np.vdot(self.regressor, Sx)) 205 if abs(den) < self._safe_eps: 206 den = den + (self._safe_eps + 0.0j) 207 208 g: np.ndarray = Sx / den 209 last_gain = g 210 211 self.w = self.w + np.conj(e_k) * g 212 213 self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.forgetting_factor 214 215 if return_internal_states: 216 yk_post = complex(np.vdot(self.w, self.regressor)) 217 y_post[k] = yk_post 218 e_post[k] = d[k] - yk_post 219 220 self._record_history() 221 222 runtime_s: float = float(time() - tic) 223 if verbose: 224 print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms") 225 226 extra: Optional[Dict[str, Any]] = None 227 if return_internal_states: 228 extra = { 229 "outputs_posteriori": y_post, 230 "errors_posteriori": e_post, 231 "S_d_last": self.S_d.copy(), 232 "gain_last": None if last_gain is None else last_gain.copy(), 233 } 234 235 return self._pack_results( 236 outputs=outputs, 237 errors=errors, 238 runtime_s=runtime_s, 239 error_type="a_priori", 240 extra=extra, 241 )
Recursive Least Squares (RLS) adaptive filter (complex-valued).
Exponentially-weighted least-squares adaptive FIR filter following Diniz (Alg. 5.3). The algorithm updates the coefficient vector using a Kalman-gain-like direction and updates an inverse correlation matrix via the matrix inversion lemma.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
delta : float
Positive initialization factor for the inverse correlation matrix:
S_d(0) = (1/delta) I.
forgetting_factor : float
Forgetting factor lambda with 0 < lambda <= 1.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant used to guard denominators. Default is 1e-12.
Notes
At iteration k, form the regressor vector (tapped delay line):
x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}
The a priori output and error are:
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Let S_d[k-1] ∈ 𝕮^{(M+1)\times(M+1)} denote the inverse correlation
estimate. Define the intermediate vector:
$$\psi[k] = S_d[k-1] x_k.$$
The gain denominator and gain vector are:
$$\Delta[k] = \lambda + x_k^H \psi[k] = \lambda + x_k^H S_d[k-1] x_k,$$
$$g[k] = \frac{\psi[k]}{\Delta[k]}.$$
The coefficient update is:
$$w[k+1] = w[k] + e^*[k] \, g[k],$$
and the inverse correlation update is:
$$S_d[k] = \frac{1}{\lambda}\Bigl(S_d[k-1] - g[k] \psi^H[k]\Bigr).$$
A posteriori quantities
If return_internal_states=True, this implementation also computes the
a posteriori output/error using the updated weights:
$$y^{post}[k] = w^H[k+1] x_k, \qquad e^{post}[k] = d[k] - y^{post}[k].$$
References
105 def __init__( 106 self, 107 filter_order: int, 108 delta: float, 109 forgetting_factor: float, 110 w_init: Optional[ArrayLike] = None, 111 *, 112 safe_eps: float = 1e-12, 113 ) -> None: 114 super().__init__(filter_order=int(filter_order), w_init=w_init) 115 116 self.forgetting_factor = float(forgetting_factor) 117 if not (0.0 < self.forgetting_factor <= 1.0): 118 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got forgetting_factor={self.forgetting_factor}.") 119 120 self.delta = float(delta) 121 if self.delta <= 0.0: 122 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 123 124 self._safe_eps = float(safe_eps) 125 126 n_taps = int(self.filter_order) + 1 127 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
129 @validate_input 130 def optimize( 131 self, 132 input_signal: np.ndarray, 133 desired_signal: np.ndarray, 134 verbose: bool = False, 135 return_internal_states: bool = False, 136 ) -> OptimizationResult: 137 """ 138 Executes the RLS adaptation loop over paired input/desired sequences. 139 140 Parameters 141 ---------- 142 input_signal : array_like of complex 143 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 144 desired_signal : array_like of complex 145 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 146 verbose : bool, optional 147 If True, prints the total runtime after completion. 148 return_internal_states : bool, optional 149 If True, includes a posteriori sequences and final internal states in 150 ``result.extra`` (see below). 151 152 Returns 153 ------- 154 OptimizationResult 155 Result object with fields: 156 - outputs : ndarray of complex, shape ``(N,)`` 157 A priori output sequence, ``y[k] = w^H[k] x_k``. 158 - errors : ndarray of complex, shape ``(N,)`` 159 A priori error sequence, ``e[k] = d[k] - y[k]``. 160 - coefficients : ndarray of complex 161 Coefficient history recorded by the base class. 162 - error_type : str 163 Set to ``"a_priori"``. 164 - extra : dict, optional 165 Present only if ``return_internal_states=True`` with: 166 - ``outputs_posteriori`` : ndarray of complex 167 A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``. 168 - ``errors_posteriori`` : ndarray of complex 169 A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``. 170 - ``S_d_last`` : ndarray of complex 171 Final inverse correlation matrix ``S_d``. 172 - ``gain_last`` : ndarray of complex 173 Last gain vector ``g[k]``. 174 """ 175 tic: float = time() 176 177 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 178 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 179 180 n_samples: int = int(d.size) 181 182 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 183 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 184 185 y_post: Optional[np.ndarray] = None 186 e_post: Optional[np.ndarray] = None 187 if return_internal_states: 188 y_post = np.zeros(n_samples, dtype=complex) 189 e_post = np.zeros(n_samples, dtype=complex) 190 191 last_gain: Optional[np.ndarray] = None 192 193 for k in range(n_samples): 194 self.regressor = np.roll(self.regressor, 1) 195 self.regressor[0] = x[k] 196 197 y_k: complex = complex(np.vdot(self.w, self.regressor)) 198 e_k: complex = d[k] - y_k 199 200 outputs[k] = y_k 201 errors[k] = e_k 202 203 Sx: np.ndarray = self.S_d @ self.regressor 204 den: complex = self.forgetting_factor + complex(np.vdot(self.regressor, Sx)) 205 if abs(den) < self._safe_eps: 206 den = den + (self._safe_eps + 0.0j) 207 208 g: np.ndarray = Sx / den 209 last_gain = g 210 211 self.w = self.w + np.conj(e_k) * g 212 213 self.S_d = (self.S_d - np.outer(g, np.conj(Sx))) / self.forgetting_factor 214 215 if return_internal_states: 216 yk_post = complex(np.vdot(self.w, self.regressor)) 217 y_post[k] = yk_post 218 e_post[k] = d[k] - yk_post 219 220 self._record_history() 221 222 runtime_s: float = float(time() - tic) 223 if verbose: 224 print(f"[RLS] Completed in {runtime_s * 1000:.03f} ms") 225 226 extra: Optional[Dict[str, Any]] = None 227 if return_internal_states: 228 extra = { 229 "outputs_posteriori": y_post, 230 "errors_posteriori": e_post, 231 "S_d_last": self.S_d.copy(), 232 "gain_last": None if last_gain is None else last_gain.copy(), 233 } 234 235 return self._pack_results( 236 outputs=outputs, 237 errors=errors, 238 runtime_s=runtime_s, 239 error_type="a_priori", 240 extra=extra, 241 )
Executes the RLS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes a posteriori sequences and final internal states in
result.extra (see below).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
A priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- outputs_posteriori : ndarray of complex
A posteriori output sequence, y^{post}[k] = w^H[k+1] x_k.
- errors_posteriori : ndarray of complex
A posteriori error sequence, e^{post}[k] = d[k] - y^{post}[k].
- S_d_last : ndarray of complex
Final inverse correlation matrix S_d.
- gain_last : ndarray of complex
Last gain vector g[k].
29class RLSAlt(AdaptiveFilter): 30 """ 31 Alternative RLS (RLS-Alt) adaptive filter (complex-valued). 32 33 Alternative RLS algorithm based on Diniz (Alg. 5.4), designed to reduce 34 the computational burden of the standard RLS recursion by introducing an 35 auxiliary vector ``psi[k]``. The method maintains an estimate of the inverse 36 input correlation matrix and updates the coefficients using a Kalman-gain-like 37 vector. 38 39 Parameters 40 ---------- 41 filter_order : int 42 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 43 delta : float 44 Positive initialization factor for the inverse correlation matrix: 45 ``S_d(0) = (1/delta) I``. 46 forgetting_factor : float 47 Forgetting factor ``lambda`` with ``0 < lambda <= 1``. 48 w_init : array_like of complex, optional 49 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 50 initializes with zeros. 51 safe_eps : float, optional 52 Small positive constant used to guard denominators. Default is 1e-12. 53 54 Notes 55 ----- 56 At iteration ``k``, form the regressor vector (tapped delay line): 57 58 - ``x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}`` 59 60 The a priori output and error are: 61 62 .. math:: 63 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 64 65 The key auxiliary vector is: 66 67 .. math:: 68 \\psi[k] = S_d[k-1] x_k, 69 70 where ``S_d[k-1]`` is the inverse correlation estimate. 71 72 Define the gain denominator: 73 74 .. math:: 75 \\Delta[k] = \\lambda + x_k^H \\psi[k] 76 = \\lambda + x_k^H S_d[k-1] x_k, 77 78 and the gain vector: 79 80 .. math:: 81 g[k] = \\frac{\\psi[k]}{\\Delta[k]}. 82 83 The coefficient update is: 84 85 .. math:: 86 w[k+1] = w[k] + e^*[k] \\, g[k], 87 88 and the inverse correlation update is: 89 90 .. math:: 91 S_d[k] = \\frac{1}{\\lambda}\\Bigl(S_d[k-1] - g[k] \\psi^H[k]\\Bigr). 92 93 A posteriori quantities 94 If requested, this implementation also computes the *a posteriori* 95 output/error using the updated weights: 96 97 .. math:: 98 y^{post}[k] = w^H[k+1] x_k, \\qquad e^{post}[k] = d[k] - y^{post}[k]. 99 100 References 101 ---------- 102 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 103 Implementation*, 5th ed., Algorithm 5.4. 104 """ 105 106 supports_complex: bool = True 107 108 forgetting_factor: float 109 delta: float 110 S_d: np.ndarray 111 112 def __init__( 113 self, 114 filter_order: int, 115 delta: float, 116 forgetting_factor: float, 117 w_init: Optional[ArrayLike] = None, 118 *, 119 safe_eps: float = 1e-12, 120 ) -> None: 121 super().__init__(filter_order=int(filter_order), w_init=w_init) 122 123 self.forgetting_factor = float(forgetting_factor) 124 if not (0.0 < self.forgetting_factor <= 1.0): 125 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got forgetting_factor={self.forgetting_factor}.") 126 127 self.delta = float(delta) 128 if self.delta <= 0.0: 129 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 130 131 self._safe_eps = float(safe_eps) 132 133 n_taps = int(self.filter_order) + 1 134 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex) 135 136 @validate_input 137 def optimize( 138 self, 139 input_signal: np.ndarray, 140 desired_signal: np.ndarray, 141 verbose: bool = False, 142 return_internal_states: bool = False, 143 ) -> OptimizationResult: 144 """ 145 Executes the RLS-Alt adaptation loop over paired input/desired sequences. 146 147 Parameters 148 ---------- 149 input_signal : array_like of complex 150 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 151 desired_signal : array_like of complex 152 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 153 verbose : bool, optional 154 If True, prints the total runtime after completion. 155 return_internal_states : bool, optional 156 If True, includes a posteriori sequences and the last internal states 157 in ``result.extra`` (see below). 158 159 Returns 160 ------- 161 OptimizationResult 162 Result object with fields: 163 - outputs : ndarray of complex, shape ``(N,)`` 164 A priori output sequence, ``y[k] = w^H[k] x_k``. 165 - errors : ndarray of complex, shape ``(N,)`` 166 A priori error sequence, ``e[k] = d[k] - y[k]``. 167 - coefficients : ndarray of complex 168 Coefficient history recorded by the base class. 169 - error_type : str 170 Set to ``"a_priori"``. 171 - extra : dict, optional 172 Present only if ``return_internal_states=True`` with: 173 - ``outputs_posteriori`` : ndarray of complex 174 A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``. 175 - ``errors_posteriori`` : ndarray of complex 176 A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``. 177 - ``S_d_last`` : ndarray of complex 178 Final inverse correlation matrix ``S_d``. 179 - ``gain_last`` : ndarray of complex 180 Last gain vector ``g[k]``. 181 """ 182 tic: float = time() 183 184 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 185 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 186 187 n_samples: int = int(d.size) 188 189 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 190 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 191 192 y_post: Optional[np.ndarray] = None 193 e_post: Optional[np.ndarray] = None 194 if return_internal_states: 195 y_post = np.zeros(n_samples, dtype=complex) 196 e_post = np.zeros(n_samples, dtype=complex) 197 198 last_gain: Optional[np.ndarray] = None 199 200 for k in range(n_samples): 201 self.regressor = np.roll(self.regressor, 1) 202 self.regressor[0] = x[k] 203 204 y_k = complex(np.vdot(self.w, self.regressor)) 205 e_k = d[k] - y_k 206 207 outputs[k] = y_k 208 errors[k] = e_k 209 210 psi: np.ndarray = self.S_d @ self.regressor 211 212 den: complex = self.forgetting_factor + complex(np.vdot(self.regressor, psi)) 213 if abs(den) < self._safe_eps: 214 den = den + (self._safe_eps + 0.0j) 215 216 g: np.ndarray = psi / den 217 last_gain = g 218 219 self.w = self.w + np.conj(e_k) * g 220 221 self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.forgetting_factor 222 223 if return_internal_states: 224 yk_post = complex(np.vdot(self.w, self.regressor)) 225 y_post[k] = yk_post 226 e_post[k] = d[k] - yk_post 227 228 self._record_history() 229 230 runtime_s: float = float(time() - tic) 231 if verbose: 232 print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms") 233 234 extra: Optional[Dict[str, Any]] = None 235 if return_internal_states: 236 extra = { 237 "outputs_posteriori": y_post, 238 "errors_posteriori": e_post, 239 "S_d_last": self.S_d.copy(), 240 "gain_last": None if last_gain is None else last_gain.copy(), 241 } 242 243 return self._pack_results( 244 outputs=outputs, 245 errors=errors, 246 runtime_s=runtime_s, 247 error_type="a_priori", 248 extra=extra, 249 )
Alternative RLS (RLS-Alt) adaptive filter (complex-valued).
Alternative RLS algorithm based on Diniz (Alg. 5.4), designed to reduce
the computational burden of the standard RLS recursion by introducing an
auxiliary vector psi[k]. The method maintains an estimate of the inverse
input correlation matrix and updates the coefficients using a Kalman-gain-like
vector.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
delta : float
Positive initialization factor for the inverse correlation matrix:
S_d(0) = (1/delta) I.
forgetting_factor : float
Forgetting factor lambda with 0 < lambda <= 1.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant used to guard denominators. Default is 1e-12.
Notes
At iteration k, form the regressor vector (tapped delay line):
x_k = [x[k], x[k-1], ..., x[k-M]]^T ∈ 𝕮^{M+1}
The a priori output and error are:
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
The key auxiliary vector is:
$$\psi[k] = S_d[k-1] x_k,$$
where S_d[k-1] is the inverse correlation estimate.
Define the gain denominator:
$$\Delta[k] = \lambda + x_k^H \psi[k] = \lambda + x_k^H S_d[k-1] x_k,$$
and the gain vector:
$$g[k] = \frac{\psi[k]}{\Delta[k]}.$$
The coefficient update is:
$$w[k+1] = w[k] + e^*[k] \, g[k],$$
and the inverse correlation update is:
$$S_d[k] = \frac{1}{\lambda}\Bigl(S_d[k-1] - g[k] \psi^H[k]\Bigr).$$
A posteriori quantities If requested, this implementation also computes the a posteriori output/error using the updated weights:
$$y^{post}[k] = w^H[k+1] x_k, \qquad e^{post}[k] = d[k] - y^{post}[k].$$
References
112 def __init__( 113 self, 114 filter_order: int, 115 delta: float, 116 forgetting_factor: float, 117 w_init: Optional[ArrayLike] = None, 118 *, 119 safe_eps: float = 1e-12, 120 ) -> None: 121 super().__init__(filter_order=int(filter_order), w_init=w_init) 122 123 self.forgetting_factor = float(forgetting_factor) 124 if not (0.0 < self.forgetting_factor <= 1.0): 125 raise ValueError(f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got forgetting_factor={self.forgetting_factor}.") 126 127 self.delta = float(delta) 128 if self.delta <= 0.0: 129 raise ValueError(f"delta must be positive. Got delta={self.delta}.") 130 131 self._safe_eps = float(safe_eps) 132 133 n_taps = int(self.filter_order) + 1 134 self.S_d = (1.0 / self.delta) * np.eye(n_taps, dtype=complex)
136 @validate_input 137 def optimize( 138 self, 139 input_signal: np.ndarray, 140 desired_signal: np.ndarray, 141 verbose: bool = False, 142 return_internal_states: bool = False, 143 ) -> OptimizationResult: 144 """ 145 Executes the RLS-Alt adaptation loop over paired input/desired sequences. 146 147 Parameters 148 ---------- 149 input_signal : array_like of complex 150 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 151 desired_signal : array_like of complex 152 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 153 verbose : bool, optional 154 If True, prints the total runtime after completion. 155 return_internal_states : bool, optional 156 If True, includes a posteriori sequences and the last internal states 157 in ``result.extra`` (see below). 158 159 Returns 160 ------- 161 OptimizationResult 162 Result object with fields: 163 - outputs : ndarray of complex, shape ``(N,)`` 164 A priori output sequence, ``y[k] = w^H[k] x_k``. 165 - errors : ndarray of complex, shape ``(N,)`` 166 A priori error sequence, ``e[k] = d[k] - y[k]``. 167 - coefficients : ndarray of complex 168 Coefficient history recorded by the base class. 169 - error_type : str 170 Set to ``"a_priori"``. 171 - extra : dict, optional 172 Present only if ``return_internal_states=True`` with: 173 - ``outputs_posteriori`` : ndarray of complex 174 A posteriori output sequence, ``y^{post}[k] = w^H[k+1] x_k``. 175 - ``errors_posteriori`` : ndarray of complex 176 A posteriori error sequence, ``e^{post}[k] = d[k] - y^{post}[k]``. 177 - ``S_d_last`` : ndarray of complex 178 Final inverse correlation matrix ``S_d``. 179 - ``gain_last`` : ndarray of complex 180 Last gain vector ``g[k]``. 181 """ 182 tic: float = time() 183 184 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 185 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 186 187 n_samples: int = int(d.size) 188 189 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 190 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 191 192 y_post: Optional[np.ndarray] = None 193 e_post: Optional[np.ndarray] = None 194 if return_internal_states: 195 y_post = np.zeros(n_samples, dtype=complex) 196 e_post = np.zeros(n_samples, dtype=complex) 197 198 last_gain: Optional[np.ndarray] = None 199 200 for k in range(n_samples): 201 self.regressor = np.roll(self.regressor, 1) 202 self.regressor[0] = x[k] 203 204 y_k = complex(np.vdot(self.w, self.regressor)) 205 e_k = d[k] - y_k 206 207 outputs[k] = y_k 208 errors[k] = e_k 209 210 psi: np.ndarray = self.S_d @ self.regressor 211 212 den: complex = self.forgetting_factor + complex(np.vdot(self.regressor, psi)) 213 if abs(den) < self._safe_eps: 214 den = den + (self._safe_eps + 0.0j) 215 216 g: np.ndarray = psi / den 217 last_gain = g 218 219 self.w = self.w + np.conj(e_k) * g 220 221 self.S_d = (self.S_d - np.outer(g, np.conj(psi))) / self.forgetting_factor 222 223 if return_internal_states: 224 yk_post = complex(np.vdot(self.w, self.regressor)) 225 y_post[k] = yk_post 226 e_post[k] = d[k] - yk_post 227 228 self._record_history() 229 230 runtime_s: float = float(time() - tic) 231 if verbose: 232 print(f"[RLSAlt] Completed in {runtime_s * 1000:.03f} ms") 233 234 extra: Optional[Dict[str, Any]] = None 235 if return_internal_states: 236 extra = { 237 "outputs_posteriori": y_post, 238 "errors_posteriori": e_post, 239 "S_d_last": self.S_d.copy(), 240 "gain_last": None if last_gain is None else last_gain.copy(), 241 } 242 243 return self._pack_results( 244 outputs=outputs, 245 errors=errors, 246 runtime_s=runtime_s, 247 error_type="a_priori", 248 extra=extra, 249 )
Executes the RLS-Alt adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes a posteriori sequences and the last internal states
in result.extra (see below).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence, y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
A priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- outputs_posteriori : ndarray of complex
A posteriori output sequence, y^{post}[k] = w^H[k+1] x_k.
- errors_posteriori : ndarray of complex
A posteriori error sequence, e^{post}[k] = d[k] - y^{post}[k].
- S_d_last : ndarray of complex
Final inverse correlation matrix S_d.
- gain_last : ndarray of complex
Last gain vector g[k].
25class SMNLMS(AdaptiveFilter): 26 """ 27 Set-Membership Normalized LMS (SM-NLMS) adaptive filter (complex-valued). 28 29 Implements Algorithm 6.1 (Diniz). The coefficients are updated **only** when 30 the magnitude of the a priori error exceeds a prescribed bound ``gamma_bar`` 31 (set-membership criterion). When an update occurs, a normalized LMS-like 32 step is applied with an effective step factor that depends on ``|e[k]|``. 33 34 Parameters 35 ---------- 36 filter_order : int 37 Adaptive FIR filter order ``M`` (number of coefficients is ``M + 1``). 38 gamma_bar : float 39 Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude. 40 An update occurs only if ``|e[k]| > gamma_bar``. 41 gamma : float 42 Regularization constant used in the NLMS denominator 43 ``gamma + ||x_k||^2`` to improve numerical stability. 44 w_init : array_like of complex, optional 45 Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros. 46 47 Notes 48 ----- 49 Let the tapped-delay regressor be 50 51 .. math:: 52 x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 53 54 The a priori output and error are 55 56 .. math:: 57 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 58 59 Set-membership condition 60 If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed. 61 62 If ``|e[k]| > \\bar{\\gamma}``, define the SM step factor 63 64 .. math:: 65 \\mu[k] = 1 - \\frac{\\bar{\\gamma}}{|e[k]|} \\in (0,1). 66 67 Normalized update (as implemented) 68 With ``\\mathrm{den}[k] = \\gamma + \\|x_k\\|^2``, the coefficient update is 69 70 .. math:: 71 w[k+1] = w[k] + \\frac{\\mu[k]}{\\mathrm{den}[k]} \\, e^*[k] \\, x_k. 72 73 Returned error type 74 This implementation reports the **a priori** sequences (computed before 75 updating ``w``), so ``error_type="a_priori"``. 76 77 References 78 ---------- 79 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 80 Implementation*, Algorithm 6.1. 81 """ 82 83 supports_complex: bool = True 84 85 gamma_bar: float 86 gamma: float 87 n_coeffs: int 88 89 def __init__( 90 self, 91 filter_order: int, 92 gamma_bar: float, 93 gamma: float, 94 w_init: Optional[Union[np.ndarray, list]] = None, 95 ) -> None: 96 super().__init__(filter_order=filter_order, w_init=w_init) 97 self.gamma_bar = float(gamma_bar) 98 self.gamma = float(gamma) 99 self.n_coeffs = int(self.filter_order + 1) 100 101 self.n_updates: int = 0 102 103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 return_internal_states: bool = False, 110 ) -> OptimizationResult: 111 """ 112 Executes the SM-NLMS adaptation over paired sequences ``x[k]`` and ``d[k]``. 113 114 Parameters 115 ---------- 116 input_signal : array_like of complex 117 Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally). 118 desired_signal : array_like of complex 119 Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally). 120 verbose : bool, optional 121 If True, prints runtime and update statistics after completion. 122 return_internal_states : bool, optional 123 If True, includes internal trajectories in ``result.extra``: 124 ``mu`` and ``den`` (each length ``N``). Entries are zero when no update occurs. 125 126 Returns 127 ------- 128 OptimizationResult 129 Result object with fields: 130 - outputs : ndarray of complex, shape ``(N,)`` 131 A priori output sequence ``y[k] = w^H[k] x_k``. 132 - errors : ndarray of complex, shape ``(N,)`` 133 A priori error sequence ``e[k] = d[k] - y[k]``. 134 - coefficients : ndarray of complex 135 Coefficient history recorded by the base class. 136 - error_type : str 137 Set to ``"a_priori"``. 138 - extra : dict 139 Always present with: 140 - ``"n_updates"`` : int 141 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 142 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 143 Boolean mask indicating which iterations performed updates. 144 Additionally present only if ``return_internal_states=True``: 145 - ``"mu"`` : ndarray of float, shape ``(N,)`` 146 Step factor ``mu[k]`` (0 when no update). 147 - ``"den"`` : ndarray of float, shape ``(N,)`` 148 Denominator ``gamma + ||x_k||^2`` (0 when no update). 149 """ 150 tic: float = time() 151 152 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 153 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 154 155 n_samples: int = int(x.size) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 158 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 159 160 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 161 162 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 163 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 164 165 self.n_updates = 0 166 167 self.regressor = np.asarray(self.regressor, dtype=complex) 168 if self.regressor.size != self.n_coeffs: 169 self.regressor = np.zeros(self.n_coeffs, dtype=complex) 170 171 for k in range(n_samples): 172 self.regressor = np.roll(self.regressor, 1) 173 self.regressor[0] = x[k] 174 175 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 176 ek: complex = complex(d[k] - yk) 177 178 outputs[k] = yk 179 errors[k] = ek 180 181 eabs: float = float(np.abs(ek)) 182 183 if eabs > self.gamma_bar: 184 self.n_updates += 1 185 update_mask[k] = True 186 187 mu: float = float(1.0 - (self.gamma_bar / eabs)) 188 189 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 190 den: float = float(self.gamma + norm_sq) 191 192 if den <= 0.0: 193 den = float(self.gamma + 1e-30) 194 195 self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor) 196 197 if return_internal_states: 198 if mu_track is not None: 199 mu_track[k] = mu 200 if den_track is not None: 201 den_track[k] = den 202 else: 203 if return_internal_states and mu_track is not None: 204 mu_track[k] = 0.0 205 206 self._record_history() 207 208 runtime_s: float = float(time() - tic) 209 if verbose: 210 pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0 211 print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms") 212 213 extra: Dict[str, Any] = { 214 "n_updates": int(self.n_updates), 215 "update_mask": update_mask, 216 } 217 if return_internal_states: 218 extra.update( 219 { 220 "mu": mu_track, 221 "den": den_track, 222 } 223 ) 224 225 return self._pack_results( 226 outputs=outputs, 227 errors=errors, 228 runtime_s=runtime_s, 229 error_type="a_priori", 230 extra=extra, 231 )
Set-Membership Normalized LMS (SM-NLMS) adaptive filter (complex-valued).
Implements Algorithm 6.1 (Diniz). The coefficients are updated only when
the magnitude of the a priori error exceeds a prescribed bound gamma_bar
(set-membership criterion). When an update occurs, a normalized LMS-like
step is applied with an effective step factor that depends on |e[k]|.
Parameters
filter_order : int
Adaptive FIR filter order M (number of coefficients is M + 1).
gamma_bar : float
Set-membership bound \bar{\gamma} for the a priori error magnitude.
An update occurs only if |e[k]| > gamma_bar.
gamma : float
Regularization constant used in the NLMS denominator
gamma + ||x_k||^2 to improve numerical stability.
w_init : array_like of complex, optional
Initial coefficient vector w(0), shape (M + 1,). If None, zeros.
Notes
Let the tapped-delay regressor be
$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
The a priori output and error are
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Set-membership condition
If |e[k]| \le \bar{\gamma}, no update is performed.
If ``|e[k]| > \bar{\gamma}``, define the SM step factor
$$\mu[k] = 1 - \frac{\bar{\gamma}}{|e[k]|} \in (0,1).$$
Normalized update (as implemented)
With \mathrm{den}[k] = \gamma + \|x_k\|^2, the coefficient update is
$$w[k+1] = w[k] + \frac{\mu[k]}{\mathrm{den}[k]} \, e^*[k] \, x_k.$$
Returned error type
This implementation reports the a priori sequences (computed before
updating w), so error_type="a_priori".
References
89 def __init__( 90 self, 91 filter_order: int, 92 gamma_bar: float, 93 gamma: float, 94 w_init: Optional[Union[np.ndarray, list]] = None, 95 ) -> None: 96 super().__init__(filter_order=filter_order, w_init=w_init) 97 self.gamma_bar = float(gamma_bar) 98 self.gamma = float(gamma) 99 self.n_coeffs = int(self.filter_order + 1) 100 101 self.n_updates: int = 0
103 @validate_input 104 def optimize( 105 self, 106 input_signal: np.ndarray, 107 desired_signal: np.ndarray, 108 verbose: bool = False, 109 return_internal_states: bool = False, 110 ) -> OptimizationResult: 111 """ 112 Executes the SM-NLMS adaptation over paired sequences ``x[k]`` and ``d[k]``. 113 114 Parameters 115 ---------- 116 input_signal : array_like of complex 117 Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally). 118 desired_signal : array_like of complex 119 Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally). 120 verbose : bool, optional 121 If True, prints runtime and update statistics after completion. 122 return_internal_states : bool, optional 123 If True, includes internal trajectories in ``result.extra``: 124 ``mu`` and ``den`` (each length ``N``). Entries are zero when no update occurs. 125 126 Returns 127 ------- 128 OptimizationResult 129 Result object with fields: 130 - outputs : ndarray of complex, shape ``(N,)`` 131 A priori output sequence ``y[k] = w^H[k] x_k``. 132 - errors : ndarray of complex, shape ``(N,)`` 133 A priori error sequence ``e[k] = d[k] - y[k]``. 134 - coefficients : ndarray of complex 135 Coefficient history recorded by the base class. 136 - error_type : str 137 Set to ``"a_priori"``. 138 - extra : dict 139 Always present with: 140 - ``"n_updates"`` : int 141 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 142 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 143 Boolean mask indicating which iterations performed updates. 144 Additionally present only if ``return_internal_states=True``: 145 - ``"mu"`` : ndarray of float, shape ``(N,)`` 146 Step factor ``mu[k]`` (0 when no update). 147 - ``"den"`` : ndarray of float, shape ``(N,)`` 148 Denominator ``gamma + ||x_k||^2`` (0 when no update). 149 """ 150 tic: float = time() 151 152 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 153 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 154 155 n_samples: int = int(x.size) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 158 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 159 160 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 161 162 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 163 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 164 165 self.n_updates = 0 166 167 self.regressor = np.asarray(self.regressor, dtype=complex) 168 if self.regressor.size != self.n_coeffs: 169 self.regressor = np.zeros(self.n_coeffs, dtype=complex) 170 171 for k in range(n_samples): 172 self.regressor = np.roll(self.regressor, 1) 173 self.regressor[0] = x[k] 174 175 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 176 ek: complex = complex(d[k] - yk) 177 178 outputs[k] = yk 179 errors[k] = ek 180 181 eabs: float = float(np.abs(ek)) 182 183 if eabs > self.gamma_bar: 184 self.n_updates += 1 185 update_mask[k] = True 186 187 mu: float = float(1.0 - (self.gamma_bar / eabs)) 188 189 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 190 den: float = float(self.gamma + norm_sq) 191 192 if den <= 0.0: 193 den = float(self.gamma + 1e-30) 194 195 self.w = self.w + (mu / den) * (np.conj(ek) * self.regressor) 196 197 if return_internal_states: 198 if mu_track is not None: 199 mu_track[k] = mu 200 if den_track is not None: 201 den_track[k] = den 202 else: 203 if return_internal_states and mu_track is not None: 204 mu_track[k] = 0.0 205 206 self._record_history() 207 208 runtime_s: float = float(time() - tic) 209 if verbose: 210 pct = (100.0 * self.n_updates / n_samples) if n_samples > 0 else 0.0 211 print(f"[SM-NLMS] Updates: {self.n_updates}/{n_samples} ({pct:.1f}%) | Runtime: {runtime_s * 1000:.03f} ms") 212 213 extra: Dict[str, Any] = { 214 "n_updates": int(self.n_updates), 215 "update_mask": update_mask, 216 } 217 if return_internal_states: 218 extra.update( 219 { 220 "mu": mu_track, 221 "den": den_track, 222 } 223 ) 224 225 return self._pack_results( 226 outputs=outputs, 227 errors=errors, 228 runtime_s=runtime_s, 229 error_type="a_priori", 230 extra=extra, 231 )
Executes the SM-NLMS adaptation over paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (flattened internally).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (flattened internally).
verbose : bool, optional
If True, prints runtime and update statistics after completion.
return_internal_states : bool, optional
If True, includes internal trajectories in result.extra:
mu and den (each length N). Entries are zero when no update occurs.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
A priori error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always present with:
- "n_updates" : int
Number of coefficient updates (iterations where |e[k]| > gamma_bar).
- "update_mask" : ndarray of bool, shape (N,)
Boolean mask indicating which iterations performed updates.
Additionally present only if return_internal_states=True:
- "mu" : ndarray of float, shape (N,)
Step factor mu[k] (0 when no update).
- "den" : ndarray of float, shape (N,)
Denominator gamma + ||x_k||^2 (0 when no update).
25class SMBNLMS(AdaptiveFilter): 26 """ 27 Set-Membership Binormalized LMS (SM-BNLMS) adaptive filter (complex-valued). 28 29 Implements Algorithm 6.5 (Diniz). This method can be viewed as a particular 30 set-membership affine-projection (SM-AP) case with projection order ``L = 1``, 31 i.e., it reuses the current and previous regressors to build a low-cost 32 two-vector update. 33 34 The filter updates **only** when the magnitude of the a priori error exceeds 35 a prescribed bound ``gamma_bar`` (set-membership criterion). 36 37 Parameters 38 ---------- 39 filter_order : int 40 Adaptive FIR filter order ``M`` (number of coefficients is ``M + 1``). 41 gamma_bar : float 42 Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude. 43 An update occurs only if ``|e[k]| > gamma_bar``. 44 gamma : float 45 Regularization factor used in the binormalized denominator. It must be 46 positive (or at least nonnegative) to improve numerical robustness. 47 w_init : array_like of complex, optional 48 Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros. 49 50 Notes 51 ----- 52 Let the tapped-delay regressor be 53 54 .. math:: 55 x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1} 56 57 and the previous regressor be ``x_{k-1}`` (as stored by the implementation). 58 The a priori output and error are 59 60 .. math:: 61 y[k] = w^H[k] x_k, \\qquad e[k] = d[k] - y[k]. 62 63 Set-membership condition 64 If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed. 65 66 If ``|e[k]| > \\bar{\\gamma}``, define the SM step factor 67 68 .. math:: 69 \\mu[k] = 1 - \\frac{\\bar{\\gamma}}{|e[k]|} \\in (0,1). 70 71 Binormalized denominator 72 Define 73 74 .. math:: 75 a = \\|x_k\\|^2, \\quad b = \\|x_{k-1}\\|^2, \\quad c = x_{k-1}^H x_k, 76 77 and 78 79 .. math:: 80 \\mathrm{den}[k] = \\gamma + a b - |c|^2. 81 82 (The code enforces a small positive floor if ``den`` becomes nonpositive.) 83 84 Update (as implemented) 85 The update uses two complex scalars ``\\lambda_1`` and ``\\lambda_2``: 86 87 .. math:: 88 \\lambda_1[k] = \\frac{\\mu[k]\\, e[k] \\, \\|x_{k-1}\\|^2}{\\mathrm{den}[k]}, \\qquad 89 \\lambda_2[k] = -\\frac{\\mu[k]\\, e[k] \\, c^*}{\\mathrm{den}[k]}. 90 91 Then the coefficients are updated by 92 93 .. math:: 94 w[k+1] = w[k] + \\lambda_1^*[k] x_k + \\lambda_2^*[k] x_{k-1}. 95 96 Returned error type 97 This implementation reports the **a priori** sequences (computed before 98 updating ``w``), so ``error_type="a_priori"``. 99 100 References 101 ---------- 102 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 103 Implementation*, Algorithm 6.5. 104 """ 105 supports_complex: bool = True 106 107 gamma_bar: float 108 gamma: float 109 n_coeffs: int 110 111 def __init__( 112 self, 113 filter_order: int, 114 gamma_bar: float, 115 gamma: float, 116 w_init: Optional[Union[np.ndarray, list]] = None, 117 ) -> None: 118 super().__init__(filter_order=filter_order, w_init=w_init) 119 120 self.gamma_bar = float(gamma_bar) 121 self.gamma = float(gamma) 122 self.n_coeffs = int(self.filter_order + 1) 123 124 self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex) 125 126 self.n_updates: int = 0 127 128 @validate_input 129 def optimize( 130 self, 131 input_signal: np.ndarray, 132 desired_signal: np.ndarray, 133 verbose: bool = False, 134 return_internal_states: bool = False, 135 ) -> OptimizationResult: 136 """ 137 Executes the SM-BNLMS adaptation over paired sequences ``x[k]`` and ``d[k]``. 138 139 Parameters 140 ---------- 141 input_signal : array_like of complex 142 Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally). 143 desired_signal : array_like of complex 144 Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally). 145 verbose : bool, optional 146 If True, prints runtime and update count after completion. 147 return_internal_states : bool, optional 148 If True, includes internal trajectories in ``result.extra``: 149 ``mu``, ``den``, ``lambda1``, ``lambda2`` (each length ``N``). Entries 150 are zero when no update occurs. 151 152 Returns 153 ------- 154 OptimizationResult 155 Result object with fields: 156 - outputs : ndarray of complex, shape ``(N,)`` 157 A priori output sequence ``y[k] = w^H[k] x_k``. 158 - errors : ndarray of complex, shape ``(N,)`` 159 A priori error sequence ``e[k] = d[k] - y[k]``. 160 - coefficients : ndarray of complex 161 Coefficient history recorded by the base class. 162 - error_type : str 163 Set to ``"a_priori"``. 164 - extra : dict 165 Always present with: 166 - ``"n_updates"`` : int 167 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 168 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 169 Boolean mask indicating which iterations performed updates. 170 Additionally present only if ``return_internal_states=True``: 171 - ``"mu"`` : ndarray of float, shape ``(N,)`` 172 Step factor ``mu[k]`` (0 when no update). 173 - ``"den"`` : ndarray of float, shape ``(N,)`` 174 Denominator used in ``lambda1/lambda2`` (0 when no update). 175 - ``"lambda1"`` : ndarray of complex, shape ``(N,)`` 176 ``lambda1[k]`` (0 when no update). 177 - ``"lambda2"`` : ndarray of complex, shape ``(N,)`` 178 ``lambda2[k]`` (0 when no update). 179 """ 180 tic: float = time() 181 182 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 183 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 184 185 n_samples: int = int(x.size) 186 n_coeffs: int = int(self.n_coeffs) 187 188 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 189 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 190 191 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 192 193 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 194 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 195 lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 196 lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 197 198 self.n_updates = 0 199 200 self.regressor = np.asarray(self.regressor, dtype=complex) 201 if self.regressor.size != n_coeffs: 202 self.regressor = np.zeros(n_coeffs, dtype=complex) 203 204 self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex) 205 if self.regressor_prev.size != n_coeffs: 206 self.regressor_prev = np.zeros(n_coeffs, dtype=complex) 207 208 for k in range(n_samples): 209 self.regressor_prev = self.regressor.copy() 210 211 self.regressor = np.roll(self.regressor, 1) 212 self.regressor[0] = x[k] 213 214 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 215 ek: complex = complex(d[k] - yk) 216 217 outputs[k] = yk 218 errors[k] = ek 219 220 eabs: float = float(np.abs(ek)) 221 222 if eabs > self.gamma_bar: 223 self.n_updates += 1 224 update_mask[k] = True 225 226 mu: float = float(1.0 - (self.gamma_bar / eabs)) 227 228 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 229 prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev))) 230 cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor)) 231 232 den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2)) 233 234 if den <= 0.0: 235 den = float(self.gamma + 1e-30) 236 237 lambda1: complex = complex((mu * ek * prev_norm_sq) / den) 238 lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den) 239 240 self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev) 241 242 if return_internal_states: 243 if mu_track is not None: 244 mu_track[k] = mu 245 if den_track is not None: 246 den_track[k] = den 247 if lam1_track is not None: 248 lam1_track[k] = lambda1 249 if lam2_track is not None: 250 lam2_track[k] = lambda2 251 else: 252 if return_internal_states and mu_track is not None: 253 mu_track[k] = 0.0 254 255 self._record_history() 256 257 runtime_s: float = float(time() - tic) 258 if verbose: 259 print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms") 260 261 extra: Dict[str, Any] = { 262 "n_updates": int(self.n_updates), 263 "update_mask": update_mask, 264 } 265 if return_internal_states: 266 extra.update( 267 { 268 "mu": mu_track, 269 "den": den_track, 270 "lambda1": lam1_track, 271 "lambda2": lam2_track, 272 } 273 ) 274 275 return self._pack_results( 276 outputs=outputs, 277 errors=errors, 278 runtime_s=runtime_s, 279 error_type="a_priori", 280 extra=extra, 281 )
Set-Membership Binormalized LMS (SM-BNLMS) adaptive filter (complex-valued).
Implements Algorithm 6.5 (Diniz). This method can be viewed as a particular
set-membership affine-projection (SM-AP) case with projection order L = 1,
i.e., it reuses the current and previous regressors to build a low-cost
two-vector update.
The filter updates only when the magnitude of the a priori error exceeds
a prescribed bound gamma_bar (set-membership criterion).
Parameters
filter_order : int
Adaptive FIR filter order M (number of coefficients is M + 1).
gamma_bar : float
Set-membership bound \bar{\gamma} for the a priori error magnitude.
An update occurs only if |e[k]| > gamma_bar.
gamma : float
Regularization factor used in the binormalized denominator. It must be
positive (or at least nonnegative) to improve numerical robustness.
w_init : array_like of complex, optional
Initial coefficient vector w(0), shape (M + 1,). If None, zeros.
Notes
Let the tapped-delay regressor be
$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}$$
and the previous regressor be x_{k-1} (as stored by the implementation).
The a priori output and error are
$$y[k] = w^H[k] x_k, \qquad e[k] = d[k] - y[k].$$
Set-membership condition
If |e[k]| \le \bar{\gamma}, no update is performed.
If ``|e[k]| > \bar{\gamma}``, define the SM step factor
$$\mu[k] = 1 - \frac{\bar{\gamma}}{|e[k]|} \in (0,1).$$
Binormalized denominator Define
$$a = \|x_k\|^2, \quad b = \|x_{k-1}\|^2, \quad c = x_{k-1}^H x_k,$$
and
$$\mathrm{den}[k] = \gamma + a b - |c|^2.$$
(The code enforces a small positive floor if ``den`` becomes nonpositive.)
Update (as implemented)
The update uses two complex scalars \lambda_1 and \lambda_2:
$$\lambda_1[k] = \frac{\mu[k]\, e[k] \, \|x_{k-1}\|^2}{\mathrm{den}[k]}, \qquad
\lambda_2[k] = -\frac{\mu[k]\, e[k] \, c^*}{\mathrm{den}[k]}.$$
Then the coefficients are updated by
$$w[k+1] = w[k] + \lambda_1^*[k] x_k + \lambda_2^*[k] x_{k-1}.$$
Returned error type
This implementation reports the a priori sequences (computed before
updating w), so error_type="a_priori".
References
111 def __init__( 112 self, 113 filter_order: int, 114 gamma_bar: float, 115 gamma: float, 116 w_init: Optional[Union[np.ndarray, list]] = None, 117 ) -> None: 118 super().__init__(filter_order=filter_order, w_init=w_init) 119 120 self.gamma_bar = float(gamma_bar) 121 self.gamma = float(gamma) 122 self.n_coeffs = int(self.filter_order + 1) 123 124 self.regressor_prev: np.ndarray = np.zeros(self.n_coeffs, dtype=complex) 125 126 self.n_updates: int = 0
128 @validate_input 129 def optimize( 130 self, 131 input_signal: np.ndarray, 132 desired_signal: np.ndarray, 133 verbose: bool = False, 134 return_internal_states: bool = False, 135 ) -> OptimizationResult: 136 """ 137 Executes the SM-BNLMS adaptation over paired sequences ``x[k]`` and ``d[k]``. 138 139 Parameters 140 ---------- 141 input_signal : array_like of complex 142 Input sequence ``x[k]`` with shape ``(N,)`` (flattened internally). 143 desired_signal : array_like of complex 144 Desired sequence ``d[k]`` with shape ``(N,)`` (flattened internally). 145 verbose : bool, optional 146 If True, prints runtime and update count after completion. 147 return_internal_states : bool, optional 148 If True, includes internal trajectories in ``result.extra``: 149 ``mu``, ``den``, ``lambda1``, ``lambda2`` (each length ``N``). Entries 150 are zero when no update occurs. 151 152 Returns 153 ------- 154 OptimizationResult 155 Result object with fields: 156 - outputs : ndarray of complex, shape ``(N,)`` 157 A priori output sequence ``y[k] = w^H[k] x_k``. 158 - errors : ndarray of complex, shape ``(N,)`` 159 A priori error sequence ``e[k] = d[k] - y[k]``. 160 - coefficients : ndarray of complex 161 Coefficient history recorded by the base class. 162 - error_type : str 163 Set to ``"a_priori"``. 164 - extra : dict 165 Always present with: 166 - ``"n_updates"`` : int 167 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 168 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 169 Boolean mask indicating which iterations performed updates. 170 Additionally present only if ``return_internal_states=True``: 171 - ``"mu"`` : ndarray of float, shape ``(N,)`` 172 Step factor ``mu[k]`` (0 when no update). 173 - ``"den"`` : ndarray of float, shape ``(N,)`` 174 Denominator used in ``lambda1/lambda2`` (0 when no update). 175 - ``"lambda1"`` : ndarray of complex, shape ``(N,)`` 176 ``lambda1[k]`` (0 when no update). 177 - ``"lambda2"`` : ndarray of complex, shape ``(N,)`` 178 ``lambda2[k]`` (0 when no update). 179 """ 180 tic: float = time() 181 182 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 183 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 184 185 n_samples: int = int(x.size) 186 n_coeffs: int = int(self.n_coeffs) 187 188 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 189 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 190 191 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 192 193 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 194 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 195 lam1_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 196 lam2_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 197 198 self.n_updates = 0 199 200 self.regressor = np.asarray(self.regressor, dtype=complex) 201 if self.regressor.size != n_coeffs: 202 self.regressor = np.zeros(n_coeffs, dtype=complex) 203 204 self.regressor_prev = np.asarray(self.regressor_prev, dtype=complex) 205 if self.regressor_prev.size != n_coeffs: 206 self.regressor_prev = np.zeros(n_coeffs, dtype=complex) 207 208 for k in range(n_samples): 209 self.regressor_prev = self.regressor.copy() 210 211 self.regressor = np.roll(self.regressor, 1) 212 self.regressor[0] = x[k] 213 214 yk: complex = complex(np.dot(self.w.conj(), self.regressor)) 215 ek: complex = complex(d[k] - yk) 216 217 outputs[k] = yk 218 errors[k] = ek 219 220 eabs: float = float(np.abs(ek)) 221 222 if eabs > self.gamma_bar: 223 self.n_updates += 1 224 update_mask[k] = True 225 226 mu: float = float(1.0 - (self.gamma_bar / eabs)) 227 228 norm_sq: float = float(np.real(np.dot(self.regressor.conj(), self.regressor))) 229 prev_norm_sq: float = float(np.real(np.dot(self.regressor_prev.conj(), self.regressor_prev))) 230 cross_term: complex = complex(np.dot(self.regressor_prev.conj(), self.regressor)) 231 232 den: float = float(self.gamma + (norm_sq * prev_norm_sq) - (np.abs(cross_term) ** 2)) 233 234 if den <= 0.0: 235 den = float(self.gamma + 1e-30) 236 237 lambda1: complex = complex((mu * ek * prev_norm_sq) / den) 238 lambda2: complex = complex(-(mu * ek * np.conj(cross_term)) / den) 239 240 self.w = self.w + (np.conj(lambda1) * self.regressor) + (np.conj(lambda2) * self.regressor_prev) 241 242 if return_internal_states: 243 if mu_track is not None: 244 mu_track[k] = mu 245 if den_track is not None: 246 den_track[k] = den 247 if lam1_track is not None: 248 lam1_track[k] = lambda1 249 if lam2_track is not None: 250 lam2_track[k] = lambda2 251 else: 252 if return_internal_states and mu_track is not None: 253 mu_track[k] = 0.0 254 255 self._record_history() 256 257 runtime_s: float = float(time() - tic) 258 if verbose: 259 print(f"[SM-BNLMS] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.03f} ms") 260 261 extra: Dict[str, Any] = { 262 "n_updates": int(self.n_updates), 263 "update_mask": update_mask, 264 } 265 if return_internal_states: 266 extra.update( 267 { 268 "mu": mu_track, 269 "den": den_track, 270 "lambda1": lam1_track, 271 "lambda2": lam2_track, 272 } 273 ) 274 275 return self._pack_results( 276 outputs=outputs, 277 errors=errors, 278 runtime_s=runtime_s, 279 error_type="a_priori", 280 extra=extra, 281 )
Executes the SM-BNLMS adaptation over paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (flattened internally).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (flattened internally).
verbose : bool, optional
If True, prints runtime and update count after completion.
return_internal_states : bool, optional
If True, includes internal trajectories in result.extra:
mu, den, lambda1, lambda2 (each length N). Entries
are zero when no update occurs.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence y[k] = w^H[k] x_k.
- errors : ndarray of complex, shape (N,)
A priori error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always present with:
- "n_updates" : int
Number of coefficient updates (iterations where |e[k]| > gamma_bar).
- "update_mask" : ndarray of bool, shape (N,)
Boolean mask indicating which iterations performed updates.
Additionally present only if return_internal_states=True:
- "mu" : ndarray of float, shape (N,)
Step factor mu[k] (0 when no update).
- "den" : ndarray of float, shape (N,)
Denominator used in lambda1/lambda2 (0 when no update).
- "lambda1" : ndarray of complex, shape (N,)
lambda1[k] (0 when no update).
- "lambda2" : ndarray of complex, shape (N,)
lambda2[k] (0 when no update).
26class SMAffineProjection(AdaptiveFilter): 27 """ 28 Set-Membership Affine-Projection (SM-AP) adaptive filter (complex-valued). 29 30 Supervised affine-projection algorithm with *set-membership* updating, 31 following Diniz (Alg. 6.2). Coefficients are updated **only** when the 32 magnitude of the most-recent a priori error exceeds a prescribed bound 33 ``gamma_bar``. When an update occurs, the algorithm enforces a target 34 a posteriori error vector (provided by ``gamma_bar_vector``). 35 36 Parameters 37 ---------- 38 filter_order : int 39 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 40 gamma_bar : float 41 Set-membership bound for the (most recent) a priori error magnitude. 42 An update is performed only if ``|e[k]| > gamma_bar``. 43 gamma_bar_vector : array_like of complex 44 Target a posteriori error vector with shape ``(L + 1,)`` (stored 45 internally as a column vector). This is algorithm-dependent and 46 corresponds to the desired post-update constraint in Alg. 6.2. 47 gamma : float 48 Regularization factor ``gamma`` used in the affine-projection normal 49 equations to improve numerical stability. 50 L : int 51 Data reuse factor (projection order). The affine-projection block size is 52 ``P = L + 1``. 53 w_init : array_like of complex, optional 54 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 55 initializes with zeros. 56 57 Notes 58 ----- 59 At iteration ``k``, form the regressor block matrix: 60 61 - ``X(k) ∈ C^{(M+1) x (L+1)}``, whose columns are the most recent regressor 62 vectors (newest in column 0). 63 64 The affine-projection output vector is: 65 66 .. math:: 67 y_{ap}(k) = X^H(k) w(k) \\in \\mathbb{C}^{L+1}. 68 69 Let the stacked desired vector be: 70 71 .. math:: 72 d_{ap}(k) \\in \\mathbb{C}^{L+1}, 73 74 with newest sample at index 0. The a priori error vector is: 75 76 .. math:: 77 e_{ap}(k) = d_{ap}(k) - y_{ap}(k). 78 79 This implementation uses the *most recent* scalar component as the reported 80 output and error: 81 82 .. math:: 83 y[k] = y_{ap}(k)[0], \\qquad e[k] = e_{ap}(k)[0]. 84 85 Set-membership update rule 86 Update **only if**: 87 88 .. math:: 89 |e[k]| > \\bar{\\gamma}. 90 91 When updating, solve the regularized system: 92 93 .. math:: 94 (X^H(k)X(k) + \\gamma I_{L+1})\\, s(k) = 95 \\bigl(e_{ap}(k) - \\bar{\\gamma}_{vec}^*(k)\\bigr), 96 97 and update the coefficients as: 98 99 .. math:: 100 w(k+1) = w(k) + X(k)\\, s(k). 101 102 Here ``\\bar{\\gamma}_{vec}`` is provided by ``gamma_bar_vector`` (stored 103 as a column vector); complex conjugation is applied to match the internal 104 conjugate-domain formulation used in the implementation. 105 106 References 107 ---------- 108 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 109 Implementation*, 5th ed., Algorithm 6.2. 110 """ 111 supports_complex: bool = True 112 113 gamma_bar: float 114 gamma_bar_vector: np.ndarray 115 gamma: float 116 L: int 117 n_coeffs: int 118 119 def __init__( 120 self, 121 filter_order: int, 122 gamma_bar: float, 123 gamma_bar_vector: Union[np.ndarray, list], 124 gamma: float, 125 L: int, 126 w_init: Optional[Union[np.ndarray, list]] = None, 127 ) -> None: 128 super().__init__(filter_order=filter_order, w_init=w_init) 129 130 self.gamma_bar = float(gamma_bar) 131 self.gamma = float(gamma) 132 self.L = int(L) 133 134 self.n_coeffs = int(self.filter_order + 1) 135 136 gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel() 137 if gvec.size != (self.L + 1): 138 raise ValueError( 139 f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}" 140 ) 141 self.gamma_bar_vector = gvec.reshape(-1, 1) 142 143 self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 144 145 self.n_updates: int = 0 146 147 @validate_input 148 def optimize( 149 self, 150 input_signal: np.ndarray, 151 desired_signal: np.ndarray, 152 verbose: bool = False, 153 return_internal_states: bool = False, 154 ) -> OptimizationResult: 155 """ 156 Executes the SM-AP adaptation loop over paired input/desired sequences. 157 158 Parameters 159 ---------- 160 input_signal : array_like of complex 161 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 162 desired_signal : array_like of complex 163 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 164 verbose : bool, optional 165 If True, prints total runtime and update count after completion. 166 return_internal_states : bool, optional 167 If True, includes the full a priori AP error-vector trajectory in 168 ``result.extra`` as ``"errors_vector"`` with shape ``(N, L + 1)``. 169 170 Returns 171 ------- 172 OptimizationResult 173 Result object with fields: 174 - outputs : ndarray of complex, shape ``(N,)`` 175 Scalar a priori output sequence, ``y[k] = y_{ap}(k)[0]``. 176 - errors : ndarray of complex, shape ``(N,)`` 177 Scalar a priori error sequence, ``e[k] = e_{ap}(k)[0]``. 178 - coefficients : ndarray of complex 179 Coefficient history recorded by the base class. 180 - error_type : str 181 Set to ``"a_priori"``. 182 - extra : dict 183 Always present with: 184 - ``"n_updates"`` : int 185 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 186 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 187 Boolean mask indicating which iterations performed updates. 188 Additionally present only if ``return_internal_states=True``: 189 - ``"errors_vector"`` : ndarray of complex, shape ``(N, L + 1)`` 190 Full affine-projection a priori error vectors over time. 191 """ 192 tic: float = time() 193 194 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 195 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 196 197 n_samples: int = int(d.size) 198 n_coeffs: int = int(self.n_coeffs) 199 Lp1: int = int(self.L + 1) 200 201 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 202 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 203 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 204 205 errors_vec_track: Optional[np.ndarray] = ( 206 np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None 207 ) 208 209 self.n_updates = 0 210 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 211 212 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 213 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 214 215 for k in range(n_samples): 216 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 217 218 start_idx = k + n_coeffs - 1 219 stop = (k - 1) if (k > 0) else None 220 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 221 222 output_ap_conj = (self.regressor_matrix.conj().T) @ w_current 223 224 desired_slice = prefixed_desired[k + self.L : stop : -1] 225 error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj 226 227 yk = output_ap_conj[0, 0] 228 ek = error_ap_conj[0, 0] 229 230 outputs[k] = yk 231 errors[k] = ek 232 if return_internal_states and errors_vec_track is not None: 233 errors_vec_track[k, :] = error_ap_conj.ravel() 234 235 if np.abs(ek) > self.gamma_bar: 236 self.n_updates += 1 237 update_mask[k] = True 238 239 R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1) 240 b = error_ap_conj - self.gamma_bar_vector.conj() 241 242 try: 243 step = np.linalg.solve(R, b) 244 except np.linalg.LinAlgError: 245 step = np.linalg.pinv(R) @ b 246 247 w_current = w_current + (self.regressor_matrix @ step) 248 249 self.w = w_current.ravel() 250 self._record_history() 251 252 runtime_s: float = float(time() - tic) 253 if verbose: 254 print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms") 255 256 extra: Dict[str, Any] = { 257 "n_updates": int(self.n_updates), 258 "update_mask": update_mask, 259 } 260 if return_internal_states: 261 extra["errors_vector"] = errors_vec_track 262 263 return self._pack_results( 264 outputs=outputs, 265 errors=errors, 266 runtime_s=runtime_s, 267 error_type="a_priori", 268 extra=extra, 269 )
Set-Membership Affine-Projection (SM-AP) adaptive filter (complex-valued).
Supervised affine-projection algorithm with set-membership updating,
following Diniz (Alg. 6.2). Coefficients are updated only when the
magnitude of the most-recent a priori error exceeds a prescribed bound
gamma_bar. When an update occurs, the algorithm enforces a target
a posteriori error vector (provided by gamma_bar_vector).
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M + 1.
gamma_bar : float
Set-membership bound for the (most recent) a priori error magnitude.
An update is performed only if |e[k]| > gamma_bar.
gamma_bar_vector : array_like of complex
Target a posteriori error vector with shape (L + 1,) (stored
internally as a column vector). This is algorithm-dependent and
corresponds to the desired post-update constraint in Alg. 6.2.
gamma : float
Regularization factor gamma used in the affine-projection normal
equations to improve numerical stability.
L : int
Data reuse factor (projection order). The affine-projection block size is
P = L + 1.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the regressor block matrix:
X(k) ∈ C^{(M+1) x (L+1)}, whose columns are the most recent regressor vectors (newest in column 0).
The affine-projection output vector is:
$$y_{ap}(k) = X^H(k) w(k) \in \mathbb{C}^{L+1}.$$
Let the stacked desired vector be:
$$d_{ap}(k) \in \mathbb{C}^{L+1},$$
with newest sample at index 0. The a priori error vector is:
$$e_{ap}(k) = d_{ap}(k) - y_{ap}(k).$$
This implementation uses the most recent scalar component as the reported output and error:
$$y[k] = y_{ap}(k)[0], \qquad e[k] = e_{ap}(k)[0].$$
Set-membership update rule Update only if:
$$|e[k]| > \bar{\gamma}.$$
When updating, solve the regularized system:
$$(X^H(k)X(k) + \gamma I_{L+1})\, s(k) =
\bigl(e_{ap}(k) - \bar{\gamma}_{vec}^*(k)\bigr),$$
and update the coefficients as:
$$w(k+1) = w(k) + X(k)\, s(k).$$
Here ``\bar{\gamma}_{vec}`` is provided by ``gamma_bar_vector`` (stored
as a column vector); complex conjugation is applied to match the internal
conjugate-domain formulation used in the implementation.
References
119 def __init__( 120 self, 121 filter_order: int, 122 gamma_bar: float, 123 gamma_bar_vector: Union[np.ndarray, list], 124 gamma: float, 125 L: int, 126 w_init: Optional[Union[np.ndarray, list]] = None, 127 ) -> None: 128 super().__init__(filter_order=filter_order, w_init=w_init) 129 130 self.gamma_bar = float(gamma_bar) 131 self.gamma = float(gamma) 132 self.L = int(L) 133 134 self.n_coeffs = int(self.filter_order + 1) 135 136 gvec = np.asarray(gamma_bar_vector, dtype=complex).ravel() 137 if gvec.size != (self.L + 1): 138 raise ValueError( 139 f"gamma_bar_vector must have size L+1 = {self.L + 1}, got {gvec.size}" 140 ) 141 self.gamma_bar_vector = gvec.reshape(-1, 1) 142 143 self.regressor_matrix = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 144 145 self.n_updates: int = 0
147 @validate_input 148 def optimize( 149 self, 150 input_signal: np.ndarray, 151 desired_signal: np.ndarray, 152 verbose: bool = False, 153 return_internal_states: bool = False, 154 ) -> OptimizationResult: 155 """ 156 Executes the SM-AP adaptation loop over paired input/desired sequences. 157 158 Parameters 159 ---------- 160 input_signal : array_like of complex 161 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 162 desired_signal : array_like of complex 163 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 164 verbose : bool, optional 165 If True, prints total runtime and update count after completion. 166 return_internal_states : bool, optional 167 If True, includes the full a priori AP error-vector trajectory in 168 ``result.extra`` as ``"errors_vector"`` with shape ``(N, L + 1)``. 169 170 Returns 171 ------- 172 OptimizationResult 173 Result object with fields: 174 - outputs : ndarray of complex, shape ``(N,)`` 175 Scalar a priori output sequence, ``y[k] = y_{ap}(k)[0]``. 176 - errors : ndarray of complex, shape ``(N,)`` 177 Scalar a priori error sequence, ``e[k] = e_{ap}(k)[0]``. 178 - coefficients : ndarray of complex 179 Coefficient history recorded by the base class. 180 - error_type : str 181 Set to ``"a_priori"``. 182 - extra : dict 183 Always present with: 184 - ``"n_updates"`` : int 185 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 186 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 187 Boolean mask indicating which iterations performed updates. 188 Additionally present only if ``return_internal_states=True``: 189 - ``"errors_vector"`` : ndarray of complex, shape ``(N, L + 1)`` 190 Full affine-projection a priori error vectors over time. 191 """ 192 tic: float = time() 193 194 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 195 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 196 197 n_samples: int = int(d.size) 198 n_coeffs: int = int(self.n_coeffs) 199 Lp1: int = int(self.L + 1) 200 201 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 202 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 203 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 204 205 errors_vec_track: Optional[np.ndarray] = ( 206 np.zeros((n_samples, Lp1), dtype=complex) if return_internal_states else None 207 ) 208 209 self.n_updates = 0 210 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 211 212 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 213 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 214 215 for k in range(n_samples): 216 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 217 218 start_idx = k + n_coeffs - 1 219 stop = (k - 1) if (k > 0) else None 220 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 221 222 output_ap_conj = (self.regressor_matrix.conj().T) @ w_current 223 224 desired_slice = prefixed_desired[k + self.L : stop : -1] 225 error_ap_conj = desired_slice.conj().reshape(-1, 1) - output_ap_conj 226 227 yk = output_ap_conj[0, 0] 228 ek = error_ap_conj[0, 0] 229 230 outputs[k] = yk 231 errors[k] = ek 232 if return_internal_states and errors_vec_track is not None: 233 errors_vec_track[k, :] = error_ap_conj.ravel() 234 235 if np.abs(ek) > self.gamma_bar: 236 self.n_updates += 1 237 update_mask[k] = True 238 239 R = (self.regressor_matrix.conj().T @ self.regressor_matrix) + self.gamma * np.eye(Lp1) 240 b = error_ap_conj - self.gamma_bar_vector.conj() 241 242 try: 243 step = np.linalg.solve(R, b) 244 except np.linalg.LinAlgError: 245 step = np.linalg.pinv(R) @ b 246 247 w_current = w_current + (self.regressor_matrix @ step) 248 249 self.w = w_current.ravel() 250 self._record_history() 251 252 runtime_s: float = float(time() - tic) 253 if verbose: 254 print(f"[SM-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.02f} ms") 255 256 extra: Dict[str, Any] = { 257 "n_updates": int(self.n_updates), 258 "update_mask": update_mask, 259 } 260 if return_internal_states: 261 extra["errors_vector"] = errors_vec_track 262 263 return self._pack_results( 264 outputs=outputs, 265 errors=errors, 266 runtime_s=runtime_s, 267 error_type="a_priori", 268 extra=extra, 269 )
Executes the SM-AP adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints total runtime and update count after completion.
return_internal_states : bool, optional
If True, includes the full a priori AP error-vector trajectory in
result.extra as "errors_vector" with shape (N, L + 1).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence, y[k] = y_{ap}(k)[0].
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = e_{ap}(k)[0].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always present with:
- "n_updates" : int
Number of coefficient updates (iterations where |e[k]| > gamma_bar).
- "update_mask" : ndarray of bool, shape (N,)
Boolean mask indicating which iterations performed updates.
Additionally present only if return_internal_states=True:
- "errors_vector" : ndarray of complex, shape (N, L + 1)
Full affine-projection a priori error vectors over time.
26class SimplifiedSMPUAP(AdaptiveFilter): 27 """ 28 Simplified Set-membership Partial-Update Affine-Projection (SM-Simp-PUAP) adaptive filter 29 (complex-valued). 30 31 Set-membership affine-projection adaptive filter with *partial updates*, following 32 Diniz (Alg. 6.6). At each iteration, the algorithm forms an affine-projection (AP) 33 a priori error vector from a sliding regressor matrix. An update is performed only 34 when the magnitude of the first a priori error component exceeds a prescribed bound 35 (set-membership condition). When an update occurs, only a subset of coefficients is 36 updated according to a user-provided selector mask. 37 38 Parameters 39 ---------- 40 filter_order : int 41 FIR filter order (number of taps minus 1). The number of coefficients is 42 ``M+1 = filter_order + 1``. 43 gamma_bar : float 44 Error magnitude threshold for triggering an update. An update is performed when 45 ``|e[k]| > gamma_bar`` where ``e[k]`` is the first AP a priori error component. 46 gamma : float 47 Regularization factor added to the AP correlation matrix (diagonal loading). 48 Must typically be positive to improve numerical stability. 49 L : int 50 Projection order / reuse data factor. The AP vectors have length ``L+1`` and 51 the regressor matrix has shape ``(M+1, L+1)``. 52 up_selector : array_like of {0,1} 53 Partial-update selector matrix with shape ``(M+1, N)``. Column ``k`` (a vector 54 of length ``M+1``) selects which coefficients are updated at iteration ``k``. 55 Non-selected coefficients remain unchanged in that iteration. 56 w_init : array_like of complex, optional 57 Initial coefficient vector ``w(0)`` with shape ``(M+1,)``. If None, initializes 58 with zeros (via the base class). 59 60 Notes 61 ----- 62 Complex-valued 63 This implementation supports complex-valued signals and coefficients 64 (``supports_complex=True``). 65 66 Regressor matrix and AP vectors (as implemented) 67 Let ``M+1`` be the number of coefficients and ``L+1`` the projection length. 68 The regressor matrix ``X[k]`` is built by stacking the most recent tapped-delay 69 input vectors: 70 71 .. math:: 72 X[k] = [x_k, x_{k-1}, \\dots, x_{k-L}] \\in \\mathbb{C}^{(M+1)\\times(L+1)}, 73 74 where each column is an ``(M+1)``-length FIR regressor built from the input signal. 75 The AP a priori output vector and error vector (conjugated form) are computed as: 76 77 .. math:: 78 y^*[k] = X^H[k] w[k-1], \\qquad 79 e^*[k] = d^*[k] - y^*[k], 80 81 producing vectors in :math:`\\mathbb{C}^{(L+1)}`. This implementation returns only 82 the *first component* as the scalar output/error: 83 84 .. math:: 85 y[k] = y^*[k]_0, \\qquad e[k] = e^*[k]_0. 86 87 Set-membership update gate (as implemented) 88 The update step-size ``mu[k]`` is defined by: 89 90 .. math:: 91 \\mu[k] = 92 \\begin{cases} 93 1 - \\frac{\\bar\\gamma}{|e[k]|}, & |e[k]| > \\bar\\gamma \\\\ 94 0, & \\text{otherwise} 95 \\end{cases} 96 97 where ``bar_gamma = gamma_bar``. 98 99 Partial-update mechanism (as implemented) 100 Let ``c[k]`` be the selector column (shape ``(M+1,1)``). The selected regressor 101 matrix is formed by element-wise selection: 102 103 .. math:: 104 C_X[k] = \\operatorname{diag}(c[k])\\,X[k], 105 106 implemented as ``C_reg = c_vec * regressor_matrix``. The regularized correlation 107 matrix is 108 109 .. math:: 110 R[k] = X^H[k] C_X[k] + \\gamma I, 111 112 and the coefficient update uses the AP unit vector ``u_1 = [1, 0, ..., 0]^T`` to 113 target the first error component: 114 115 .. math:: 116 w[k] = w[k-1] + C_X[k] R^{-1}[k] (\\mu[k] e[k] u_1). 117 118 Implementation details 119 - ``up_selector`` must provide at least ``N`` columns for an ``N``-sample run. 120 - ``np.linalg.solve`` is used for the linear system; if singular/ill-conditioned, 121 a pseudoinverse fallback is used. 122 - Coefficient history is recorded by the base class at every iteration. 123 124 References 125 ---------- 126 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 127 Implementation*, 5th ed., Algorithm 6.6. 128 """ 129 supports_complex: bool = True 130 131 gamma_bar: float 132 gamma: float 133 L: int 134 n_coeffs: int 135 136 def __init__( 137 self, 138 filter_order: int, 139 gamma_bar: float, 140 gamma: float, 141 L: int, 142 up_selector: Union[np.ndarray, list], 143 w_init: Optional[Union[np.ndarray, list]] = None, 144 ) -> None: 145 """ 146 Parameters 147 ---------- 148 filter_order: 149 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 150 gamma_bar: 151 Error magnitude threshold for triggering updates. 152 gamma: 153 Regularization factor for the AP correlation matrix. 154 L: 155 Reuse data factor / constraint length (projection order). 156 up_selector: 157 Partial-update selector matrix with shape (M+1, N), entries in {0,1}. 158 Each column selects which coefficients are updated at iteration k. 159 w_init: 160 Optional initial coefficient vector. If None, initializes to zeros. 161 """ 162 super().__init__(filter_order=filter_order, w_init=w_init) 163 164 self.gamma_bar = float(gamma_bar) 165 self.gamma = float(gamma) 166 self.L = int(L) 167 self.n_coeffs = int(self.filter_order + 1) 168 169 sel = np.asarray(up_selector) 170 if sel.ndim != 2: 171 raise ValueError("up_selector must be a 2D array with shape (M+1, N).") 172 if sel.shape[0] != self.n_coeffs: 173 raise ValueError( 174 f"up_selector must have shape (M+1, N) with M+1={self.n_coeffs}, got {sel.shape}." 175 ) 176 self.up_selector: np.ndarray = sel 177 178 self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 179 180 self.n_updates: int = 0 181 182 @validate_input 183 def optimize( 184 self, 185 input_signal: np.ndarray, 186 desired_signal: np.ndarray, 187 verbose: bool = False, 188 return_internal_states: bool = False, 189 ) -> OptimizationResult: 190 """ 191 Executes the SM-Simp-PUAP adaptation loop over paired input/desired sequences. 192 193 Parameters 194 ---------- 195 input_signal : array_like of complex 196 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 197 desired_signal : array_like of complex 198 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 199 verbose : bool, optional 200 If True, prints the total runtime and the number of performed updates. 201 return_internal_states : bool, optional 202 If True, includes internal trajectories in ``result.extra``: 203 ``"mu"`` and ``"selected_count"`` in addition to the always-present 204 set-membership bookkeeping fields. 205 206 Returns 207 ------- 208 OptimizationResult 209 Result object with fields: 210 - outputs : ndarray of complex, shape ``(N,)`` 211 Scalar a priori output sequence (first component of the AP output vector). 212 - errors : ndarray of complex, shape ``(N,)`` 213 Scalar a priori error sequence (first component of the AP error vector). 214 - coefficients : ndarray of complex 215 Coefficient history recorded by the base class. 216 - error_type : str 217 Set to ``"a_priori"``. 218 - extra : dict 219 Always includes ``"n_updates"`` and ``"update_mask"``. If 220 ``return_internal_states=True``, also includes ``"mu"`` and 221 ``"selected_count"``. 222 """ 223 tic: float = perf_counter() 224 225 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 226 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 227 228 n_samples: int = int(d.size) 229 n_coeffs: int = int(self.n_coeffs) 230 Lp1: int = int(self.L + 1) 231 232 if self.up_selector.shape[1] < n_samples: 233 raise ValueError( 234 f"up_selector has {self.up_selector.shape[1]} columns, but signal has {n_samples} samples." 235 ) 236 237 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 238 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 239 240 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 241 242 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 243 selcnt_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=int) if return_internal_states else None 244 245 self.n_updates = 0 246 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 247 248 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 249 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 250 251 u1: np.ndarray = np.zeros((Lp1, 1), dtype=complex) 252 u1[0, 0] = 1.0 253 254 for k in range(n_samples): 255 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 256 start_idx = k + n_coeffs - 1 257 stop = (k - 1) if (k > 0) else None 258 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 259 260 output_ap_conj: np.ndarray = (self.regressor_matrix.conj().T) @ w_current 261 desired_slice = prefixed_desired[k + self.L : stop : -1] 262 error_ap_conj: np.ndarray = desired_slice.conj().reshape(-1, 1) - output_ap_conj 263 264 yk: complex = complex(output_ap_conj[0, 0]) 265 ek: complex = complex(error_ap_conj[0, 0]) 266 267 outputs[k] = yk 268 errors[k] = ek 269 270 eabs: float = float(np.abs(ek)) 271 if eabs > self.gamma_bar: 272 self.n_updates += 1 273 update_mask[k] = True 274 mu: float = float(1.0 - (self.gamma_bar / eabs)) 275 else: 276 mu = 0.0 277 278 c_vec: np.ndarray = self.up_selector[:, k].reshape(-1, 1).astype(float) 279 280 if return_internal_states and selcnt_track is not None: 281 selcnt_track[k] = int(np.sum(c_vec != 0)) 282 283 if mu > 0.0: 284 C_reg: np.ndarray = c_vec * self.regressor_matrix # (M+1, L+1) 285 286 R: np.ndarray = (self.regressor_matrix.conj().T @ C_reg) + self.gamma * np.eye(Lp1) 287 288 rhs: np.ndarray = mu * ek * u1 # (L+1,1) 289 290 try: 291 inv_term = np.linalg.solve(R, rhs) 292 except np.linalg.LinAlgError: 293 inv_term = np.linalg.pinv(R) @ rhs 294 295 w_current = w_current + (C_reg @ inv_term) 296 297 if return_internal_states and mu_track is not None: 298 mu_track[k] = mu 299 300 self.w = w_current.ravel() 301 self._record_history() 302 303 runtime_s: float = perf_counter() - tic 304 if verbose: 305 print(f"[SM-Simp-PUAP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms") 306 307 extra: Dict[str, Any] = { 308 "n_updates": int(self.n_updates), 309 "update_mask": update_mask, 310 } 311 if return_internal_states: 312 extra.update( 313 { 314 "mu": mu_track, 315 "selected_count": selcnt_track, 316 } 317 ) 318 319 return self._pack_results( 320 outputs=outputs, 321 errors=errors, 322 runtime_s=runtime_s, 323 error_type="a_priori", 324 extra=extra, 325 )
Simplified Set-membership Partial-Update Affine-Projection (SM-Simp-PUAP) adaptive filter (complex-valued).
Set-membership affine-projection adaptive filter with partial updates, following Diniz (Alg. 6.6). At each iteration, the algorithm forms an affine-projection (AP) a priori error vector from a sliding regressor matrix. An update is performed only when the magnitude of the first a priori error component exceeds a prescribed bound (set-membership condition). When an update occurs, only a subset of coefficients is updated according to a user-provided selector mask.
Parameters
filter_order : int
FIR filter order (number of taps minus 1). The number of coefficients is
M+1 = filter_order + 1.
gamma_bar : float
Error magnitude threshold for triggering an update. An update is performed when
|e[k]| > gamma_bar where e[k] is the first AP a priori error component.
gamma : float
Regularization factor added to the AP correlation matrix (diagonal loading).
Must typically be positive to improve numerical stability.
L : int
Projection order / reuse data factor. The AP vectors have length L+1 and
the regressor matrix has shape (M+1, L+1).
up_selector : array_like of {0,1}
Partial-update selector matrix with shape (M+1, N). Column k (a vector
of length M+1) selects which coefficients are updated at iteration k.
Non-selected coefficients remain unchanged in that iteration.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M+1,). If None, initializes
with zeros (via the base class).
Notes
Complex-valued
This implementation supports complex-valued signals and coefficients
(supports_complex=True).
Regressor matrix and AP vectors (as implemented)
Let M+1 be the number of coefficients and L+1 the projection length.
The regressor matrix X[k] is built by stacking the most recent tapped-delay
input vectors:
$$X[k] = [x_k, x_{k-1}, \dots, x_{k-L}] \in \mathbb{C}^{(M+1)\times(L+1)},$$
where each column is an ``(M+1)``-length FIR regressor built from the input signal.
The AP a priori output vector and error vector (conjugated form) are computed as:
$$y^*[k] = X^H[k] w[k-1], \qquad
e^[k] = d^[k] - y^*[k],$$
producing vectors in \\( \mathbb{C}^{(L+1)} \\). This implementation returns only
the *first component* as the scalar output/error:
$$y[k] = y^*[k]_0, \qquad e[k] = e^*[k]_0.$$
Set-membership update gate (as implemented)
The update step-size mu[k] is defined by:
$$\mu[k] =
\begin{cases} 1 - \frac{\bar\gamma}{|e[k]|}, & |e[k]| > \bar\gamma \ 0, & \text{otherwise} \end{cases}$$
where ``bar_gamma = gamma_bar``.
Partial-update mechanism (as implemented)
Let c[k] be the selector column (shape (M+1,1)). The selected regressor
matrix is formed by element-wise selection:
$$C_X[k] = \operatorname{diag}(c[k])\,X[k],$$
implemented as ``C_reg = c_vec * regressor_matrix``. The regularized correlation
matrix is
$$R[k] = X^H[k] C_X[k] + \gamma I,$$
and the coefficient update uses the AP unit vector ``u_1 = [1, 0, ..., 0]^T`` to
target the first error component:
$$w[k] = w[k-1] + C_X[k] R^{-1}[k] (\mu[k] e[k] u_1).$$
Implementation details
- up_selector must provide at least N columns for an N-sample run.
- np.linalg.solve is used for the linear system; if singular/ill-conditioned,
a pseudoinverse fallback is used.
- Coefficient history is recorded by the base class at every iteration.
References
136 def __init__( 137 self, 138 filter_order: int, 139 gamma_bar: float, 140 gamma: float, 141 L: int, 142 up_selector: Union[np.ndarray, list], 143 w_init: Optional[Union[np.ndarray, list]] = None, 144 ) -> None: 145 """ 146 Parameters 147 ---------- 148 filter_order: 149 FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. 150 gamma_bar: 151 Error magnitude threshold for triggering updates. 152 gamma: 153 Regularization factor for the AP correlation matrix. 154 L: 155 Reuse data factor / constraint length (projection order). 156 up_selector: 157 Partial-update selector matrix with shape (M+1, N), entries in {0,1}. 158 Each column selects which coefficients are updated at iteration k. 159 w_init: 160 Optional initial coefficient vector. If None, initializes to zeros. 161 """ 162 super().__init__(filter_order=filter_order, w_init=w_init) 163 164 self.gamma_bar = float(gamma_bar) 165 self.gamma = float(gamma) 166 self.L = int(L) 167 self.n_coeffs = int(self.filter_order + 1) 168 169 sel = np.asarray(up_selector) 170 if sel.ndim != 2: 171 raise ValueError("up_selector must be a 2D array with shape (M+1, N).") 172 if sel.shape[0] != self.n_coeffs: 173 raise ValueError( 174 f"up_selector must have shape (M+1, N) with M+1={self.n_coeffs}, got {sel.shape}." 175 ) 176 self.up_selector: np.ndarray = sel 177 178 self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 179 180 self.n_updates: int = 0
Parameters
filter_order: FIR filter order (number of taps - 1). Number of coefficients is filter_order + 1. gamma_bar: Error magnitude threshold for triggering updates. gamma: Regularization factor for the AP correlation matrix. L: Reuse data factor / constraint length (projection order). up_selector: Partial-update selector matrix with shape (M+1, N), entries in {0,1}. Each column selects which coefficients are updated at iteration k. w_init: Optional initial coefficient vector. If None, initializes to zeros.
182 @validate_input 183 def optimize( 184 self, 185 input_signal: np.ndarray, 186 desired_signal: np.ndarray, 187 verbose: bool = False, 188 return_internal_states: bool = False, 189 ) -> OptimizationResult: 190 """ 191 Executes the SM-Simp-PUAP adaptation loop over paired input/desired sequences. 192 193 Parameters 194 ---------- 195 input_signal : array_like of complex 196 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 197 desired_signal : array_like of complex 198 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 199 verbose : bool, optional 200 If True, prints the total runtime and the number of performed updates. 201 return_internal_states : bool, optional 202 If True, includes internal trajectories in ``result.extra``: 203 ``"mu"`` and ``"selected_count"`` in addition to the always-present 204 set-membership bookkeeping fields. 205 206 Returns 207 ------- 208 OptimizationResult 209 Result object with fields: 210 - outputs : ndarray of complex, shape ``(N,)`` 211 Scalar a priori output sequence (first component of the AP output vector). 212 - errors : ndarray of complex, shape ``(N,)`` 213 Scalar a priori error sequence (first component of the AP error vector). 214 - coefficients : ndarray of complex 215 Coefficient history recorded by the base class. 216 - error_type : str 217 Set to ``"a_priori"``. 218 - extra : dict 219 Always includes ``"n_updates"`` and ``"update_mask"``. If 220 ``return_internal_states=True``, also includes ``"mu"`` and 221 ``"selected_count"``. 222 """ 223 tic: float = perf_counter() 224 225 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 226 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 227 228 n_samples: int = int(d.size) 229 n_coeffs: int = int(self.n_coeffs) 230 Lp1: int = int(self.L + 1) 231 232 if self.up_selector.shape[1] < n_samples: 233 raise ValueError( 234 f"up_selector has {self.up_selector.shape[1]} columns, but signal has {n_samples} samples." 235 ) 236 237 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 238 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 239 240 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 241 242 mu_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 243 selcnt_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=int) if return_internal_states else None 244 245 self.n_updates = 0 246 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 247 248 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 249 prefixed_desired: np.ndarray = np.concatenate([np.zeros(self.L, dtype=complex), d]) 250 251 u1: np.ndarray = np.zeros((Lp1, 1), dtype=complex) 252 u1[0, 0] = 1.0 253 254 for k in range(n_samples): 255 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 256 start_idx = k + n_coeffs - 1 257 stop = (k - 1) if (k > 0) else None 258 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 259 260 output_ap_conj: np.ndarray = (self.regressor_matrix.conj().T) @ w_current 261 desired_slice = prefixed_desired[k + self.L : stop : -1] 262 error_ap_conj: np.ndarray = desired_slice.conj().reshape(-1, 1) - output_ap_conj 263 264 yk: complex = complex(output_ap_conj[0, 0]) 265 ek: complex = complex(error_ap_conj[0, 0]) 266 267 outputs[k] = yk 268 errors[k] = ek 269 270 eabs: float = float(np.abs(ek)) 271 if eabs > self.gamma_bar: 272 self.n_updates += 1 273 update_mask[k] = True 274 mu: float = float(1.0 - (self.gamma_bar / eabs)) 275 else: 276 mu = 0.0 277 278 c_vec: np.ndarray = self.up_selector[:, k].reshape(-1, 1).astype(float) 279 280 if return_internal_states and selcnt_track is not None: 281 selcnt_track[k] = int(np.sum(c_vec != 0)) 282 283 if mu > 0.0: 284 C_reg: np.ndarray = c_vec * self.regressor_matrix # (M+1, L+1) 285 286 R: np.ndarray = (self.regressor_matrix.conj().T @ C_reg) + self.gamma * np.eye(Lp1) 287 288 rhs: np.ndarray = mu * ek * u1 # (L+1,1) 289 290 try: 291 inv_term = np.linalg.solve(R, rhs) 292 except np.linalg.LinAlgError: 293 inv_term = np.linalg.pinv(R) @ rhs 294 295 w_current = w_current + (C_reg @ inv_term) 296 297 if return_internal_states and mu_track is not None: 298 mu_track[k] = mu 299 300 self.w = w_current.ravel() 301 self._record_history() 302 303 runtime_s: float = perf_counter() - tic 304 if verbose: 305 print(f"[SM-Simp-PUAP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms") 306 307 extra: Dict[str, Any] = { 308 "n_updates": int(self.n_updates), 309 "update_mask": update_mask, 310 } 311 if return_internal_states: 312 extra.update( 313 { 314 "mu": mu_track, 315 "selected_count": selcnt_track, 316 } 317 ) 318 319 return self._pack_results( 320 outputs=outputs, 321 errors=errors, 322 runtime_s=runtime_s, 323 error_type="a_priori", 324 extra=extra, 325 )
Executes the SM-Simp-PUAP adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime and the number of performed updates.
return_internal_states : bool, optional
If True, includes internal trajectories in result.extra:
"mu" and "selected_count" in addition to the always-present
set-membership bookkeeping fields.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence (first component of the AP output vector).
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence (first component of the AP error vector).
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always includes "n_updates" and "update_mask". If
return_internal_states=True, also includes "mu" and
"selected_count".
26class SimplifiedSMAP(AdaptiveFilter): 27 """ 28 Simplified Set-Membership Affine Projection (SM-Simp-AP) adaptive filter 29 (complex-valued). 30 31 Implements Algorithm 6.3 (Diniz). This is a simplified affine-projection 32 set-membership scheme where an AP-style regressor matrix of length ``L+1`` 33 is maintained, but **the update uses only the most recent column** (the 34 current regressor vector). Updates occur only when the a priori error 35 magnitude exceeds ``gamma_bar``. 36 37 Parameters 38 ---------- 39 filter_order : int 40 FIR filter order ``M`` (number of coefficients is ``M + 1``). 41 gamma_bar : float 42 Set-membership bound ``\\bar{\\gamma}`` for the a priori error magnitude. 43 An update occurs only if ``|e[k]| > gamma_bar``. 44 gamma : float 45 Regularization constant used in the normalization denominator 46 ``gamma + ||x_k||^2``. 47 L : int 48 Reuse data factor / constraint length. In this simplified variant it 49 mainly determines the number of columns kept in the internal AP-style 50 regressor matrix (size ``(M+1) x (L+1)``); only the first column is used 51 in the update. 52 w_init : array_like of complex, optional 53 Initial coefficient vector ``w(0)``, shape ``(M + 1,)``. If None, zeros. 54 55 Notes 56 ----- 57 Regressor definition 58 The current tapped-delay regressor is 59 60 .. math:: 61 x_k = [x[k], x[k-1], \\dots, x[k-M]]^T \\in \\mathbb{C}^{M+1}. 62 63 Internally, the algorithm maintains an AP regressor matrix 64 65 .. math:: 66 X_k = [x_k, x_{k-1}, \\dots, x_{k-L}] \\in \\mathbb{C}^{(M+1)\\times(L+1)}, 67 68 but the update uses only the first column ``x_k``. 69 70 A priori output and error (as implemented) 71 This implementation computes 72 73 .. math:: 74 y[k] = x_k^H w[k], 75 76 and stores it as ``outputs[k]``. 77 The stored error is 78 79 .. math:: 80 e[k] = d^*[k] - y[k]. 81 82 (This matches the semantics of your code; many texts use 83 ``e[k] = d[k] - w^H x_k``. If you want the textbook convention, you’d 84 remove the conjugation on ``d[k]`` and ensure ``y[k]=w^H x_k``.) 85 86 Set-membership condition 87 If ``|e[k]| \\le \\bar{\\gamma}``, no update is performed. 88 89 If ``|e[k]| > \\bar{\\gamma}``, define the scalar step factor 90 91 .. math:: 92 s[k] = \\left(1 - \\frac{\\bar{\\gamma}}{|e[k]|}\\right) e[k]. 93 94 Normalized update (simplified AP) 95 With ``\\mathrm{den}[k] = \\gamma + \\|x_k\\|^2``, the coefficient update is 96 97 .. math:: 98 w[k+1] = w[k] + \\frac{s[k]}{\\mathrm{den}[k]} \\, x_k. 99 100 Returned error type 101 The returned sequences correspond to **a priori** quantities (computed 102 before updating ``w``), so ``error_type="a_priori"``. 103 104 References 105 ---------- 106 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 107 Implementation*, Algorithm 6.3. 108 """ 109 supports_complex: bool = True 110 gamma_bar: float 111 gamma: float 112 L: int 113 n_coeffs: int 114 115 def __init__( 116 self, 117 filter_order: int, 118 gamma_bar: float, 119 gamma: float, 120 L: int, 121 w_init: Optional[Union[np.ndarray, list]] = None, 122 ) -> None: 123 super().__init__(filter_order=filter_order, w_init=w_init) 124 125 self.gamma_bar = float(gamma_bar) 126 self.gamma = float(gamma) 127 self.L = int(L) 128 self.n_coeffs = int(self.filter_order + 1) 129 130 self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 131 132 self.X_matrix = self.regressor_matrix 133 134 self.n_updates: int = 0 135 136 @validate_input 137 def optimize( 138 self, 139 input_signal: np.ndarray, 140 desired_signal: np.ndarray, 141 verbose: bool = False, 142 return_internal_states: bool = False, 143 ) -> OptimizationResult: 144 """ 145 Executes the SM-Simp-AP adaptation. 146 147 Parameters 148 ---------- 149 input_signal : array_like of complex 150 Input sequence ``x[k]``, shape ``(N,)`` (flattened internally). 151 desired_signal : array_like of complex 152 Desired sequence ``d[k]``, shape ``(N,)`` (flattened internally). 153 verbose : bool, optional 154 If True, prints runtime and update statistics after completion. 155 return_internal_states : bool, optional 156 If True, includes internal trajectories in ``result.extra``: 157 ``step_factor`` and ``den`` (each length ``N``). Entries are zero 158 when no update occurs. 159 160 Returns 161 ------- 162 OptimizationResult 163 Result object with fields: 164 - outputs : ndarray of complex, shape ``(N,)`` 165 A priori output sequence. 166 - errors : ndarray of complex, shape ``(N,)`` 167 A priori error sequence (as in code: ``e[k] = conj(d[k]) - y[k]``). 168 - coefficients : ndarray of complex 169 Coefficient history recorded by the base class. 170 - error_type : str 171 Set to ``"a_priori"``. 172 - extra : dict 173 Always present with: 174 - ``"n_updates"`` : int 175 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 176 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 177 Boolean mask indicating which iterations performed updates. 178 Additionally present only if ``return_internal_states=True``: 179 - ``"step_factor"`` : ndarray of complex, shape ``(N,)`` 180 Scalar factor ``(1 - gamma_bar/|e|) * e`` (0 when no update). 181 - ``"den"`` : ndarray of float, shape ``(N,)`` 182 Denominator ``gamma + ||x_k||^2`` (0 when no update). 183 """ 184 tic: float = time() 185 186 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 187 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 188 189 n_samples: int = int(d.size) 190 n_coeffs: int = int(self.n_coeffs) 191 192 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 193 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 194 195 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 196 197 step_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 198 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 199 200 self.n_updates = 0 201 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 202 203 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 204 205 for k in range(n_samples): 206 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 207 208 start_idx = k + n_coeffs - 1 209 stop = (k - 1) if (k > 0) else None 210 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 211 212 xk: np.ndarray = self.regressor_matrix[:, 0:1] 213 214 output_k: complex = complex((xk.conj().T @ w_current).item()) 215 error_k: complex = complex(np.conj(d[k]) - output_k) 216 217 outputs[k] = output_k 218 errors[k] = error_k 219 220 eabs: float = float(np.abs(error_k)) 221 222 if eabs > self.gamma_bar: 223 self.n_updates += 1 224 update_mask[k] = True 225 226 step_factor: complex = complex((1.0 - (self.gamma_bar / eabs)) * error_k) 227 228 norm_sq: float = float(np.real((xk.conj().T @ xk).item())) 229 den: float = float(self.gamma + norm_sq) 230 if den <= 0.0: 231 den = float(self.gamma + 1e-30) 232 233 w_current = w_current + (step_factor / den) * xk 234 235 if return_internal_states: 236 if step_track is not None: 237 step_track[k] = step_factor 238 if den_track is not None: 239 den_track[k] = den 240 else: 241 if return_internal_states: 242 if step_track is not None: 243 step_track[k] = 0.0 + 0.0j 244 if den_track is not None: 245 den_track[k] = 0.0 246 247 self.w = w_current.ravel() 248 self._record_history() 249 250 runtime_s: float = float(time() - tic) 251 if verbose: 252 print(f"[SM-Simp-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms") 253 254 extra: Dict[str, Any] = { 255 "n_updates": int(self.n_updates), 256 "update_mask": update_mask, 257 } 258 if return_internal_states: 259 extra.update( 260 { 261 "step_factor": step_track, 262 "den": den_track, 263 } 264 ) 265 266 return self._pack_results( 267 outputs=outputs, 268 errors=errors, 269 runtime_s=runtime_s, 270 error_type="a_priori", 271 extra=extra, 272 )
Simplified Set-Membership Affine Projection (SM-Simp-AP) adaptive filter (complex-valued).
Implements Algorithm 6.3 (Diniz). This is a simplified affine-projection
set-membership scheme where an AP-style regressor matrix of length L+1
is maintained, but the update uses only the most recent column (the
current regressor vector). Updates occur only when the a priori error
magnitude exceeds gamma_bar.
Parameters
filter_order : int
FIR filter order M (number of coefficients is M + 1).
gamma_bar : float
Set-membership bound \bar{\gamma} for the a priori error magnitude.
An update occurs only if |e[k]| > gamma_bar.
gamma : float
Regularization constant used in the normalization denominator
gamma + ||x_k||^2.
L : int
Reuse data factor / constraint length. In this simplified variant it
mainly determines the number of columns kept in the internal AP-style
regressor matrix (size (M+1) x (L+1)); only the first column is used
in the update.
w_init : array_like of complex, optional
Initial coefficient vector w(0), shape (M + 1,). If None, zeros.
Notes
Regressor definition The current tapped-delay regressor is
$$x_k = [x[k], x[k-1], \dots, x[k-M]]^T \in \mathbb{C}^{M+1}.$$
Internally, the algorithm maintains an AP regressor matrix
$$X_k = [x_k, x_{k-1}, \dots, x_{k-L}] \in \mathbb{C}^{(M+1)\times(L+1)},$$
but the update uses only the first column ``x_k``.
A priori output and error (as implemented) This implementation computes
$$y[k] = x_k^H w[k],$$
and stores it as ``outputs[k]``.
The stored error is
$$e[k] = d^*[k] - y[k].$$
(This matches the semantics of your code; many texts use
``e[k] = d[k] - w^H x_k``. If you want the textbook convention, you’d
remove the conjugation on ``d[k]`` and ensure ``y[k]=w^H x_k``.)
Set-membership condition
If |e[k]| \le \bar{\gamma}, no update is performed.
If ``|e[k]| > \bar{\gamma}``, define the scalar step factor
$$s[k] = \left(1 - \frac{\bar{\gamma}}{|e[k]|}\right) e[k].$$
Normalized update (simplified AP)
With \mathrm{den}[k] = \gamma + \|x_k\|^2, the coefficient update is
$$w[k+1] = w[k] + \frac{s[k]}{\mathrm{den}[k]} \, x_k.$$
Returned error type
The returned sequences correspond to a priori quantities (computed
before updating w), so error_type="a_priori".
References
115 def __init__( 116 self, 117 filter_order: int, 118 gamma_bar: float, 119 gamma: float, 120 L: int, 121 w_init: Optional[Union[np.ndarray, list]] = None, 122 ) -> None: 123 super().__init__(filter_order=filter_order, w_init=w_init) 124 125 self.gamma_bar = float(gamma_bar) 126 self.gamma = float(gamma) 127 self.L = int(L) 128 self.n_coeffs = int(self.filter_order + 1) 129 130 self.regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, self.L + 1), dtype=complex) 131 132 self.X_matrix = self.regressor_matrix 133 134 self.n_updates: int = 0
136 @validate_input 137 def optimize( 138 self, 139 input_signal: np.ndarray, 140 desired_signal: np.ndarray, 141 verbose: bool = False, 142 return_internal_states: bool = False, 143 ) -> OptimizationResult: 144 """ 145 Executes the SM-Simp-AP adaptation. 146 147 Parameters 148 ---------- 149 input_signal : array_like of complex 150 Input sequence ``x[k]``, shape ``(N,)`` (flattened internally). 151 desired_signal : array_like of complex 152 Desired sequence ``d[k]``, shape ``(N,)`` (flattened internally). 153 verbose : bool, optional 154 If True, prints runtime and update statistics after completion. 155 return_internal_states : bool, optional 156 If True, includes internal trajectories in ``result.extra``: 157 ``step_factor`` and ``den`` (each length ``N``). Entries are zero 158 when no update occurs. 159 160 Returns 161 ------- 162 OptimizationResult 163 Result object with fields: 164 - outputs : ndarray of complex, shape ``(N,)`` 165 A priori output sequence. 166 - errors : ndarray of complex, shape ``(N,)`` 167 A priori error sequence (as in code: ``e[k] = conj(d[k]) - y[k]``). 168 - coefficients : ndarray of complex 169 Coefficient history recorded by the base class. 170 - error_type : str 171 Set to ``"a_priori"``. 172 - extra : dict 173 Always present with: 174 - ``"n_updates"`` : int 175 Number of coefficient updates (iterations where ``|e[k]| > gamma_bar``). 176 - ``"update_mask"`` : ndarray of bool, shape ``(N,)`` 177 Boolean mask indicating which iterations performed updates. 178 Additionally present only if ``return_internal_states=True``: 179 - ``"step_factor"`` : ndarray of complex, shape ``(N,)`` 180 Scalar factor ``(1 - gamma_bar/|e|) * e`` (0 when no update). 181 - ``"den"`` : ndarray of float, shape ``(N,)`` 182 Denominator ``gamma + ||x_k||^2`` (0 when no update). 183 """ 184 tic: float = time() 185 186 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 187 d: np.ndarray = np.asarray(desired_signal, dtype=complex).ravel() 188 189 n_samples: int = int(d.size) 190 n_coeffs: int = int(self.n_coeffs) 191 192 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 193 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 194 195 update_mask: np.ndarray = np.zeros(n_samples, dtype=bool) 196 197 step_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 198 den_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 199 200 self.n_updates = 0 201 w_current: np.ndarray = self.w.astype(complex, copy=False).reshape(-1, 1) 202 203 prefixed_input: np.ndarray = np.concatenate([np.zeros(n_coeffs - 1, dtype=complex), x]) 204 205 for k in range(n_samples): 206 self.regressor_matrix[:, 1:] = self.regressor_matrix[:, :-1] 207 208 start_idx = k + n_coeffs - 1 209 stop = (k - 1) if (k > 0) else None 210 self.regressor_matrix[:, 0] = prefixed_input[start_idx:stop:-1] 211 212 xk: np.ndarray = self.regressor_matrix[:, 0:1] 213 214 output_k: complex = complex((xk.conj().T @ w_current).item()) 215 error_k: complex = complex(np.conj(d[k]) - output_k) 216 217 outputs[k] = output_k 218 errors[k] = error_k 219 220 eabs: float = float(np.abs(error_k)) 221 222 if eabs > self.gamma_bar: 223 self.n_updates += 1 224 update_mask[k] = True 225 226 step_factor: complex = complex((1.0 - (self.gamma_bar / eabs)) * error_k) 227 228 norm_sq: float = float(np.real((xk.conj().T @ xk).item())) 229 den: float = float(self.gamma + norm_sq) 230 if den <= 0.0: 231 den = float(self.gamma + 1e-30) 232 233 w_current = w_current + (step_factor / den) * xk 234 235 if return_internal_states: 236 if step_track is not None: 237 step_track[k] = step_factor 238 if den_track is not None: 239 den_track[k] = den 240 else: 241 if return_internal_states: 242 if step_track is not None: 243 step_track[k] = 0.0 + 0.0j 244 if den_track is not None: 245 den_track[k] = 0.0 246 247 self.w = w_current.ravel() 248 self._record_history() 249 250 runtime_s: float = float(time() - tic) 251 if verbose: 252 print(f"[SM-Simp-AP] Updates: {self.n_updates}/{n_samples} | Runtime: {runtime_s * 1000:.2f} ms") 253 254 extra: Dict[str, Any] = { 255 "n_updates": int(self.n_updates), 256 "update_mask": update_mask, 257 } 258 if return_internal_states: 259 extra.update( 260 { 261 "step_factor": step_track, 262 "den": den_track, 263 } 264 ) 265 266 return self._pack_results( 267 outputs=outputs, 268 errors=errors, 269 runtime_s=runtime_s, 270 error_type="a_priori", 271 extra=extra, 272 )
Executes the SM-Simp-AP adaptation.
Parameters
input_signal : array_like of complex
Input sequence x[k], shape (N,) (flattened internally).
desired_signal : array_like of complex
Desired sequence d[k], shape (N,) (flattened internally).
verbose : bool, optional
If True, prints runtime and update statistics after completion.
return_internal_states : bool, optional
If True, includes internal trajectories in result.extra:
step_factor and den (each length N). Entries are zero
when no update occurs.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence.
- errors : ndarray of complex, shape (N,)
A priori error sequence (as in code: e[k] = conj(d[k]) - y[k]).
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always present with:
- "n_updates" : int
Number of coefficient updates (iterations where |e[k]| > gamma_bar).
- "update_mask" : ndarray of bool, shape (N,)
Boolean mask indicating which iterations performed updates.
Additionally present only if return_internal_states=True:
- "step_factor" : ndarray of complex, shape (N,)
Scalar factor (1 - gamma_bar/|e|) * e (0 when no update).
- "den" : ndarray of float, shape (N,)
Denominator gamma + ||x_k||^2 (0 when no update).
25class LRLSPosteriori(AdaptiveFilter): 26 """ 27 Lattice RLS using a posteriori errors (LRLS, a posteriori form), complex-valued. 28 29 Implements Diniz (Algorithm 7.1) in a lattice/ladder structure: 30 31 1) **Lattice prediction stage** (order ``M``): 32 Updates forward/backward a posteriori prediction errors and energy terms 33 using exponentially weighted recursions. 34 35 2) **Ladder (joint-process) stage** (length ``M+1``): 36 Updates the ladder coefficients ``v`` and produces the **a posteriori** 37 output error by progressively "whitening" the desired sample through the 38 backward-error vector. 39 40 Library conventions 41 ------------------- 42 - Complex-valued implementation (``supports_complex=True``). 43 - Ladder coefficients are stored in ``self.v`` with length ``M+1``. 44 - For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`, 45 ``self.w`` mirrors ``self.v`` at each iteration and the base-class history 46 corresponds to the ladder coefficient trajectory. 47 48 Parameters 49 ---------- 50 filter_order : int 51 Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients. 52 lambda_factor : float, optional 53 Forgetting factor ``lambda`` used in the exponentially weighted recursions. 54 Default is 0.99. 55 epsilon : float, optional 56 Initialization/regularization constant for the energy variables 57 (forward/backward). Default is 0.1. 58 w_init : array_like of complex, optional 59 Optional initial ladder coefficients of length ``M+1``. If None, initializes 60 with zeros. 61 denom_floor : float, optional 62 Small positive floor used to avoid division by (near) zero in normalization 63 terms (``gamma`` variables and energy denominators). Default is 1e-12. 64 xi_floor : float, optional 65 Floor applied to energy variables to keep them positive. If None, defaults 66 to ``epsilon``. 67 68 Notes 69 ----- 70 Signals and dimensions 71 ~~~~~~~~~~~~~~~~~~~~~~ 72 For lattice order ``M``: 73 74 - ``delta`` has shape ``(M,)`` (lattice delta state) 75 - ``xi_f`` and ``xi_b`` have shape ``(M+1,)`` (forward/backward energies) 76 - ``error_b_prev`` and the per-sample ``curr_err_b`` have shape ``(M+1,)`` 77 (backward-error vectors) 78 - ``v`` and ``delta_v`` have shape ``(M+1,)`` (ladder state and coefficients) 79 80 A posteriori error (as returned) 81 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 82 The ladder stage starts with ``e_post = d[k]`` and updates it as: 83 84 .. math:: 85 e_{post}(k) \\leftarrow e_{post}(k) - v_m^*(k)\\, b_m(k), 86 87 where :math:`b_m(k)` are the components of the backward-error vector. 88 The final ``e_post`` is the **a posteriori error** returned in ``errors[k]``, 89 while the output estimate is returned as ``outputs[k] = d[k] - e_post``. 90 91 References 92 ---------- 93 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 94 Implementation*, Algorithm 7.1. 95 """ 96 97 supports_complex: bool = True 98 99 def __init__( 100 self, 101 filter_order: int, 102 lambda_factor: float = 0.99, 103 epsilon: float = 0.1, 104 w_init: Optional[Union[np.ndarray, list]] = None, 105 denom_floor: float = 1e-12, 106 xi_floor: Optional[float] = None, 107 ) -> None: 108 """ 109 Parameters 110 ---------- 111 filter_order: 112 Number of lattice sections M. Ladder has M+1 coefficients. 113 lambda_factor: 114 Forgetting factor λ. 115 epsilon: 116 Energy initialization / regularization. 117 w_init: 118 Optional initial ladder coefficient vector (length M+1). If None, zeros. 119 denom_floor: 120 Floor used to avoid division by (near) zero in normalization terms. 121 xi_floor: 122 Floor used to keep energies positive (defaults to epsilon). 123 """ 124 super().__init__(filter_order=filter_order, w_init=w_init) 125 126 self.lam = float(lambda_factor) 127 self.epsilon = float(epsilon) 128 self.n_sections = int(filter_order) 129 130 self._tiny = float(denom_floor) 131 self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon) 132 133 self.delta = np.zeros(self.n_sections, dtype=complex) 134 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 135 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 136 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 137 138 if w_init is not None: 139 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 140 if v0.size != self.n_sections + 1: 141 raise ValueError( 142 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 143 ) 144 self.v = v0 145 else: 146 self.v = np.zeros(self.n_sections + 1, dtype=complex) 147 148 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 149 150 self.w = self.v.copy() 151 self.w_history = [] 152 self._record_history() 153 154 @validate_input 155 def optimize( 156 self, 157 input_signal: np.ndarray, 158 desired_signal: np.ndarray, 159 verbose: bool = False, 160 return_internal_states: bool = False, 161 ) -> OptimizationResult: 162 """ 163 Executes LRLS adaptation (a posteriori form) over paired sequences ``x[k]`` and ``d[k]``. 164 165 Parameters 166 ---------- 167 input_signal : array_like of complex 168 Input sequence ``x[k]`` with shape ``(N,)``. 169 desired_signal : array_like of complex 170 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 171 verbose : bool, optional 172 If True, prints the total runtime after completion. 173 return_internal_states : bool, optional 174 If True, returns selected *final* internal states in ``result.extra`` 175 (not full trajectories). 176 177 Returns 178 ------- 179 OptimizationResult 180 Result object with fields: 181 - outputs : ndarray of complex, shape ``(N,)`` 182 Estimated output sequence. In this implementation: 183 ``outputs[k] = d[k] - e_post[k]``. 184 - errors : ndarray of complex, shape ``(N,)`` 185 A posteriori error produced by the ladder stage (final ``e_post``). 186 - coefficients : ndarray 187 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 188 - error_type : str 189 Set to ``"a_posteriori"``. 190 - extra : dict, optional 191 Present only if ``return_internal_states=True`` (see below). 192 193 Extra (when return_internal_states=True) 194 -------------------------------------- 195 xi_f : ndarray of float, shape ``(M+1,)`` 196 Final forward energies. 197 xi_b : ndarray of float, shape ``(M+1,)`` 198 Final backward energies. 199 delta : ndarray of complex, shape ``(M,)`` 200 Final lattice delta state. 201 delta_v : ndarray of complex, shape ``(M+1,)`` 202 Final ladder delta state used to compute ``v``. 203 """ 204 t0 = perf_counter() 205 206 x_in = np.asarray(input_signal, dtype=complex).ravel() 207 d_in = np.asarray(desired_signal, dtype=complex).ravel() 208 209 n_samples = int(d_in.size) 210 outputs = np.zeros(n_samples, dtype=complex) 211 errors = np.zeros(n_samples, dtype=complex) 212 213 for k in range(n_samples): 214 err_f = complex(x_in[k]) 215 216 curr_err_b = np.zeros(self.n_sections + 1, dtype=complex) 217 curr_err_b[0] = x_in[k] 218 219 energy_x = float(np.real(err_f * np.conj(err_f))) 220 self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor) 221 self.xi_b[0] = self.xi_f[0] 222 223 gamma_m = 1.0 224 225 for m in range(self.n_sections): 226 denom_g = max(gamma_m, self._tiny) 227 228 self.delta[m] = ( 229 self.lam * self.delta[m] 230 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 231 ) 232 233 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 234 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 235 236 new_err_f = err_f - kappa_f * self.error_b_prev[m] 237 curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 238 239 self.xi_f[m + 1] = max( 240 self.lam * self.xi_f[m + 1] 241 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g, 242 self._xi_floor, 243 ) 244 self.xi_b[m + 1] = max( 245 self.lam * self.xi_b[m + 1] 246 + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g, 247 self._xi_floor, 248 ) 249 250 denom_xib = self.xi_b[m] + self._tiny 251 energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 252 gamma_m_next = gamma_m - (energy_b_curr / denom_xib) 253 254 gamma_m = max(gamma_m_next, self._tiny) 255 err_f = new_err_f 256 257 e_post = complex(d_in[k]) 258 gamma_ladder = 1.0 259 260 for m in range(self.n_sections + 1): 261 denom_gl = max(gamma_ladder, self._tiny) 262 263 self.delta_v[m] = ( 264 self.lam * self.delta_v[m] 265 + (curr_err_b[m] * np.conj(e_post)) / denom_gl 266 ) 267 268 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 269 270 e_post = e_post - np.conj(self.v[m]) * curr_err_b[m] 271 272 denom_xib_m = self.xi_b[m] + self._tiny 273 energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 274 gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m) 275 gamma_ladder = max(gamma_ladder_next, self._tiny) 276 277 outputs[k] = d_in[k] - e_post 278 errors[k] = e_post 279 280 self.error_b_prev = curr_err_b.copy() 281 282 self.w = self.v.copy() 283 self._record_history() 284 285 runtime_s = float(perf_counter() - t0) 286 if verbose: 287 print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms") 288 289 extra: Optional[Dict[str, Any]] = None 290 if return_internal_states: 291 extra = { 292 "xi_f": self.xi_f.copy(), 293 "xi_b": self.xi_b.copy(), 294 "delta": self.delta.copy(), 295 "delta_v": self.delta_v.copy(), 296 } 297 298 return self._pack_results( 299 outputs=outputs, 300 errors=errors, 301 runtime_s=runtime_s, 302 error_type="a_posteriori", 303 extra=extra, 304 )
Lattice RLS using a posteriori errors (LRLS, a posteriori form), complex-valued.
Implements Diniz (Algorithm 7.1) in a lattice/ladder structure:
1) Lattice prediction stage (order M):
Updates forward/backward a posteriori prediction errors and energy terms
using exponentially weighted recursions.
2) Ladder (joint-process) stage (length M+1):
Updates the ladder coefficients v and produces the a posteriori
output error by progressively "whitening" the desired sample through the
backward-error vector.
Library conventions
- Complex-valued implementation (
supports_complex=True). - Ladder coefficients are stored in
self.vwith lengthM+1. - For compatibility with
~pydaptivefiltering.base.AdaptiveFilter,self.wmirrorsself.vat each iteration and the base-class history corresponds to the ladder coefficient trajectory.
Parameters
filter_order : int
Lattice order M (number of sections). The ladder has M+1 coefficients.
lambda_factor : float, optional
Forgetting factor lambda used in the exponentially weighted recursions.
Default is 0.99.
epsilon : float, optional
Initialization/regularization constant for the energy variables
(forward/backward). Default is 0.1.
w_init : array_like of complex, optional
Optional initial ladder coefficients of length M+1. If None, initializes
with zeros.
denom_floor : float, optional
Small positive floor used to avoid division by (near) zero in normalization
terms (gamma variables and energy denominators). Default is 1e-12.
xi_floor : float, optional
Floor applied to energy variables to keep them positive. If None, defaults
to epsilon.
Notes
Signals and dimensions
~~~~~~
For lattice order M:
deltahas shape(M,)(lattice delta state)xi_fandxi_bhave shape(M+1,)(forward/backward energies)error_b_prevand the per-samplecurr_err_bhave shape(M+1,)(backward-error vectors)vanddelta_vhave shape(M+1,)(ladder state and coefficients)
A posteriori error (as returned)
~~~~~~~~~
The ladder stage starts with e_post = d[k] and updates it as:
$$e_{post}(k) \leftarrow e_{post}(k) - v_m^*(k)\, b_m(k),$$
where \( b_m(k) \) are the components of the backward-error vector.
The final e_post is the a posteriori error returned in errors[k],
while the output estimate is returned as outputs[k] = d[k] - e_post.
References
99 def __init__( 100 self, 101 filter_order: int, 102 lambda_factor: float = 0.99, 103 epsilon: float = 0.1, 104 w_init: Optional[Union[np.ndarray, list]] = None, 105 denom_floor: float = 1e-12, 106 xi_floor: Optional[float] = None, 107 ) -> None: 108 """ 109 Parameters 110 ---------- 111 filter_order: 112 Number of lattice sections M. Ladder has M+1 coefficients. 113 lambda_factor: 114 Forgetting factor λ. 115 epsilon: 116 Energy initialization / regularization. 117 w_init: 118 Optional initial ladder coefficient vector (length M+1). If None, zeros. 119 denom_floor: 120 Floor used to avoid division by (near) zero in normalization terms. 121 xi_floor: 122 Floor used to keep energies positive (defaults to epsilon). 123 """ 124 super().__init__(filter_order=filter_order, w_init=w_init) 125 126 self.lam = float(lambda_factor) 127 self.epsilon = float(epsilon) 128 self.n_sections = int(filter_order) 129 130 self._tiny = float(denom_floor) 131 self._xi_floor = float(xi_floor) if xi_floor is not None else float(self.epsilon) 132 133 self.delta = np.zeros(self.n_sections, dtype=complex) 134 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 135 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 136 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 137 138 if w_init is not None: 139 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 140 if v0.size != self.n_sections + 1: 141 raise ValueError( 142 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 143 ) 144 self.v = v0 145 else: 146 self.v = np.zeros(self.n_sections + 1, dtype=complex) 147 148 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 149 150 self.w = self.v.copy() 151 self.w_history = [] 152 self._record_history()
Parameters
filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms. xi_floor: Floor used to keep energies positive (defaults to epsilon).
154 @validate_input 155 def optimize( 156 self, 157 input_signal: np.ndarray, 158 desired_signal: np.ndarray, 159 verbose: bool = False, 160 return_internal_states: bool = False, 161 ) -> OptimizationResult: 162 """ 163 Executes LRLS adaptation (a posteriori form) over paired sequences ``x[k]`` and ``d[k]``. 164 165 Parameters 166 ---------- 167 input_signal : array_like of complex 168 Input sequence ``x[k]`` with shape ``(N,)``. 169 desired_signal : array_like of complex 170 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 171 verbose : bool, optional 172 If True, prints the total runtime after completion. 173 return_internal_states : bool, optional 174 If True, returns selected *final* internal states in ``result.extra`` 175 (not full trajectories). 176 177 Returns 178 ------- 179 OptimizationResult 180 Result object with fields: 181 - outputs : ndarray of complex, shape ``(N,)`` 182 Estimated output sequence. In this implementation: 183 ``outputs[k] = d[k] - e_post[k]``. 184 - errors : ndarray of complex, shape ``(N,)`` 185 A posteriori error produced by the ladder stage (final ``e_post``). 186 - coefficients : ndarray 187 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 188 - error_type : str 189 Set to ``"a_posteriori"``. 190 - extra : dict, optional 191 Present only if ``return_internal_states=True`` (see below). 192 193 Extra (when return_internal_states=True) 194 -------------------------------------- 195 xi_f : ndarray of float, shape ``(M+1,)`` 196 Final forward energies. 197 xi_b : ndarray of float, shape ``(M+1,)`` 198 Final backward energies. 199 delta : ndarray of complex, shape ``(M,)`` 200 Final lattice delta state. 201 delta_v : ndarray of complex, shape ``(M+1,)`` 202 Final ladder delta state used to compute ``v``. 203 """ 204 t0 = perf_counter() 205 206 x_in = np.asarray(input_signal, dtype=complex).ravel() 207 d_in = np.asarray(desired_signal, dtype=complex).ravel() 208 209 n_samples = int(d_in.size) 210 outputs = np.zeros(n_samples, dtype=complex) 211 errors = np.zeros(n_samples, dtype=complex) 212 213 for k in range(n_samples): 214 err_f = complex(x_in[k]) 215 216 curr_err_b = np.zeros(self.n_sections + 1, dtype=complex) 217 curr_err_b[0] = x_in[k] 218 219 energy_x = float(np.real(err_f * np.conj(err_f))) 220 self.xi_f[0] = max(self.lam * self.xi_f[0] + energy_x, self._xi_floor) 221 self.xi_b[0] = self.xi_f[0] 222 223 gamma_m = 1.0 224 225 for m in range(self.n_sections): 226 denom_g = max(gamma_m, self._tiny) 227 228 self.delta[m] = ( 229 self.lam * self.delta[m] 230 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 231 ) 232 233 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 234 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 235 236 new_err_f = err_f - kappa_f * self.error_b_prev[m] 237 curr_err_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 238 239 self.xi_f[m + 1] = max( 240 self.lam * self.xi_f[m + 1] 241 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g, 242 self._xi_floor, 243 ) 244 self.xi_b[m + 1] = max( 245 self.lam * self.xi_b[m + 1] 246 + float(np.real(curr_err_b[m + 1] * np.conj(curr_err_b[m + 1]))) / denom_g, 247 self._xi_floor, 248 ) 249 250 denom_xib = self.xi_b[m] + self._tiny 251 energy_b_curr = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 252 gamma_m_next = gamma_m - (energy_b_curr / denom_xib) 253 254 gamma_m = max(gamma_m_next, self._tiny) 255 err_f = new_err_f 256 257 e_post = complex(d_in[k]) 258 gamma_ladder = 1.0 259 260 for m in range(self.n_sections + 1): 261 denom_gl = max(gamma_ladder, self._tiny) 262 263 self.delta_v[m] = ( 264 self.lam * self.delta_v[m] 265 + (curr_err_b[m] * np.conj(e_post)) / denom_gl 266 ) 267 268 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 269 270 e_post = e_post - np.conj(self.v[m]) * curr_err_b[m] 271 272 denom_xib_m = self.xi_b[m] + self._tiny 273 energy_b_l = float(np.real(curr_err_b[m] * np.conj(curr_err_b[m]))) 274 gamma_ladder_next = gamma_ladder - (energy_b_l / denom_xib_m) 275 gamma_ladder = max(gamma_ladder_next, self._tiny) 276 277 outputs[k] = d_in[k] - e_post 278 errors[k] = e_post 279 280 self.error_b_prev = curr_err_b.copy() 281 282 self.w = self.v.copy() 283 self._record_history() 284 285 runtime_s = float(perf_counter() - t0) 286 if verbose: 287 print(f"[LRLSPosteriori] Completed in {runtime_s * 1000:.02f} ms") 288 289 extra: Optional[Dict[str, Any]] = None 290 if return_internal_states: 291 extra = { 292 "xi_f": self.xi_f.copy(), 293 "xi_b": self.xi_b.copy(), 294 "delta": self.delta.copy(), 295 "delta_v": self.delta_v.copy(), 296 } 297 298 return self._pack_results( 299 outputs=outputs, 300 errors=errors, 301 runtime_s=runtime_s, 302 error_type="a_posteriori", 303 extra=extra, 304 )
Executes LRLS adaptation (a posteriori form) over paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,).
desired_signal : array_like of complex
Desired/reference sequence d[k] with shape (N,).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, returns selected final internal states in result.extra
(not full trajectories).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Estimated output sequence. In this implementation:
outputs[k] = d[k] - e_post[k].
- errors : ndarray of complex, shape (N,)
A posteriori error produced by the ladder stage (final e_post).
- coefficients : ndarray
Ladder coefficient history (mirrors self.v via self.w).
- error_type : str
Set to "a_posteriori".
- extra : dict, optional
Present only if return_internal_states=True (see below).
Extra (when return_internal_states=True)
xi_f : ndarray of float, shape (M+1,)
Final forward energies.
xi_b : ndarray of float, shape (M+1,)
Final backward energies.
delta : ndarray of complex, shape (M,)
Final lattice delta state.
delta_v : ndarray of complex, shape (M+1,)
Final ladder delta state used to compute v.
26class LRLSErrorFeedback(AdaptiveFilter): 27 """ 28 Lattice RLS with a posteriori errors and Error Feedback (LRLS-EF), complex-valued. 29 30 Implements the lattice/ladder RLS structure with error feedback described in 31 Diniz (Algorithm 7.5). The method decomposes the adaptation into: 32 33 1) **Lattice prediction stage**: 34 Updates forward/backward a posteriori prediction errors and associated 35 reflection-like variables via exponentially weighted energies. 36 37 2) **Ladder (joint-process) stage**: 38 Estimates the ladder coefficients that map the lattice backward-error 39 vector into the desired response. 40 41 In this implementation, the ladder coefficient vector is stored in ``self.v`` 42 (length ``M+1``). For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`, 43 ``self.w`` mirrors ``self.v`` at each iteration and the coefficient history 44 recorded by the base class corresponds to the ladder coefficients. 45 46 Parameters 47 ---------- 48 filter_order : int 49 Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients. 50 lambda_factor : float, optional 51 Forgetting factor ``lambda`` used in the exponentially weighted recursions. 52 Default is 0.99. 53 epsilon : float, optional 54 Positive initialization/regularization constant for forward and backward 55 energies. Default is 0.1. 56 w_init : ComplexArrayLike, optional 57 Optional initial ladder coefficients of length ``M+1``. If None, initializes 58 with zeros. 59 safe_eps : float, optional 60 Small positive floor used to avoid division by (near) zero and to keep the 61 internal likelihood variables bounded. Default is 1e-12. 62 63 Notes 64 ----- 65 Signals and dimensions 66 ~~~~~~~~~~~~~~~~~~~~~~ 67 This class operates on complex-valued sequences. For lattice order ``M``: 68 69 - ``delta`` and ``delta_v`` have shape ``(M+1,)`` 70 - ``xi_f`` and ``xi_b`` have shape ``(M+2,)`` (energies per section plus guard) 71 - ``error_b_prev`` has shape ``(M+2,)`` and stores the previous backward-error 72 vector used for the error-feedback recursion. 73 - At each time k, the ladder regressor is the backward-error vector 74 ``curr_b[:M+1]``. 75 76 Output computation 77 ~~~~~~~~~~~~~~~~~~ 78 The estimated output is formed as a ladder combination: 79 80 .. math:: 81 y(k) = \\mathbf{v}^H(k)\\, \\mathbf{b}(k), 82 83 where :math:`\\mathbf{b}(k)` corresponds to ``curr_b[:M+1]`` and 84 :math:`\\mathbf{v}(k)` is the ladder coefficient vector ``self.v``. 85 The reported error is the output error :math:`e(k)=d(k)-y(k)`. 86 87 References 88 ---------- 89 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 90 Implementation*, Algorithm 7.5. 91 """ 92 93 supports_complex: bool = True 94 95 lam: float 96 epsilon: float 97 n_sections: int 98 safe_eps: float 99 100 delta: ArrayLike 101 xi_f: ArrayLike 102 xi_b: ArrayLike 103 error_b_prev: ArrayLike 104 105 v: ArrayLike 106 delta_v: ArrayLike 107 108 def __init__( 109 self, 110 filter_order: int, 111 lambda_factor: float = 0.99, 112 epsilon: float = 0.1, 113 w_init: Optional[ComplexArrayLike] = None, 114 safe_eps: float = 1e-12, 115 ) -> None: 116 """ 117 Parameters 118 ---------- 119 filter_order: 120 Lattice order M (number of sections). The ladder has M+1 coefficients. 121 lambda_factor: 122 Forgetting factor λ. 123 epsilon: 124 Regularization/initialization constant for energies. 125 w_init: 126 Optional initial ladder coefficients (length M+1). If None, zeros. 127 safe_eps: 128 Small positive floor used to avoid division by (near) zero. 129 """ 130 super().__init__(filter_order=filter_order, w_init=w_init) 131 132 self.lam = float(lambda_factor) 133 self.epsilon = float(epsilon) 134 self.n_sections = int(filter_order) 135 self.safe_eps = float(safe_eps) 136 137 self.delta = np.zeros(self.n_sections + 1, dtype=complex) 138 139 self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 140 self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 141 142 self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex) 143 144 if w_init is not None: 145 v0 = np.asarray(w_init, dtype=complex).ravel() 146 if v0.size != self.n_sections + 1: 147 raise ValueError( 148 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 149 ) 150 self.v = v0 151 else: 152 self.v = np.zeros(self.n_sections + 1, dtype=complex) 153 154 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 155 156 self.w = self.v.copy() 157 self.w_history = [] 158 self._record_history() 159 160 @validate_input 161 def optimize( 162 self, 163 input_signal: ArrayLike, 164 desired_signal: ArrayLike, 165 verbose: bool = False, 166 return_internal_states: bool = False, 167 ) -> OptimizationResult: 168 """ 169 Executes LRLS-EF adaptation for paired sequences ``x[k]`` and ``d[k]``. 170 171 Parameters 172 ---------- 173 input_signal : array_like of complex 174 Input sequence ``x[k]`` with shape ``(N,)``. 175 desired_signal : array_like of complex 176 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 177 verbose : bool, optional 178 If True, prints the total runtime after completion. 179 return_internal_states : bool, optional 180 If True, returns selected *final* internal states in ``result.extra`` 181 (not full trajectories). 182 183 Returns 184 ------- 185 OptimizationResult 186 Result object with fields: 187 - outputs : ndarray of complex, shape ``(N,)`` 188 Estimated output sequence ``y[k]``. 189 - errors : ndarray of complex, shape ``(N,)`` 190 Output error sequence ``e[k] = d[k] - y[k]``. 191 - coefficients : ndarray 192 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 193 - error_type : str 194 Set to ``"output_error"``. 195 - extra : dict, optional 196 Present only if ``return_internal_states=True`` (see below). 197 198 Extra (when return_internal_states=True) 199 -------------------------------------- 200 xi_f : ndarray of float, shape ``(M+2,)`` 201 Final forward prediction-error energies. 202 xi_b : ndarray of float, shape ``(M+2,)`` 203 Final backward prediction-error energies. 204 delta : ndarray of complex, shape ``(M+1,)`` 205 Final lattice delta (reflection-like) state. 206 delta_v : ndarray of complex, shape ``(M+1,)`` 207 Final ladder delta state used to compute ``v``. 208 """ 209 tic: float = time() 210 211 x_in = np.asarray(input_signal, dtype=complex).ravel() 212 d_in = np.asarray(desired_signal, dtype=complex).ravel() 213 214 n_samples = int(d_in.size) 215 outputs = np.zeros(n_samples, dtype=complex) 216 errors = np.zeros(n_samples, dtype=complex) 217 218 eps = self.safe_eps 219 220 for k in range(n_samples): 221 err_f = complex(x_in[k]) 222 223 curr_b = np.zeros(self.n_sections + 2, dtype=complex) 224 curr_b[0] = x_in[k] 225 226 energy_x = float(np.real(x_in[k] * np.conj(x_in[k]))) 227 self.xi_f[0] = self.lam * self.xi_f[0] + energy_x 228 self.xi_b[0] = self.xi_f[0] 229 230 g = 1.0 231 232 for m in range(self.n_sections + 1): 233 denom_g = max(g, eps) 234 235 self.delta[m] = ( 236 self.lam * self.delta[m] 237 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 238 ) 239 240 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps) 241 kappa_b = self.delta[m] / (self.xi_f[m] + eps) 242 243 new_err_f = err_f - kappa_f * self.error_b_prev[m] 244 curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 245 246 self.xi_f[m + 1] = ( 247 self.lam * self.xi_f[m + 1] 248 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g 249 ) 250 self.xi_b[m + 1] = ( 251 self.lam * self.xi_b[m + 1] 252 + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g 253 ) 254 255 energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 256 g = g - (energy_b_curr / (self.xi_b[m] + eps)) 257 g = max(g, eps) 258 259 err_f = new_err_f 260 261 y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1])) 262 outputs[k] = y_k 263 e_k = complex(d_in[k] - y_k) 264 errors[k] = e_k 265 266 g_ladder = 1.0 267 for m in range(self.n_sections + 1): 268 denom_gl = max(g_ladder, eps) 269 270 self.delta_v[m] = ( 271 self.lam * self.delta_v[m] 272 + (curr_b[m] * np.conj(d_in[k])) / denom_gl 273 ) 274 275 self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps) 276 277 energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 278 g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps)) 279 g_ladder = max(g_ladder, eps) 280 281 self.error_b_prev = curr_b 282 283 self.w = self.v.copy() 284 self._record_history() 285 286 runtime_s = float(time() - tic) 287 if verbose: 288 print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms") 289 290 extra: Optional[Dict[str, Any]] = None 291 if return_internal_states: 292 extra = { 293 "xi_f": self.xi_f.copy(), 294 "xi_b": self.xi_b.copy(), 295 "delta": self.delta.copy(), 296 "delta_v": self.delta_v.copy(), 297 } 298 299 return self._pack_results( 300 outputs=outputs, 301 errors=errors, 302 runtime_s=runtime_s, 303 error_type="output_error", 304 extra=extra, 305 )
Lattice RLS with a posteriori errors and Error Feedback (LRLS-EF), complex-valued.
Implements the lattice/ladder RLS structure with error feedback described in Diniz (Algorithm 7.5). The method decomposes the adaptation into:
1) Lattice prediction stage: Updates forward/backward a posteriori prediction errors and associated reflection-like variables via exponentially weighted energies.
2) Ladder (joint-process) stage: Estimates the ladder coefficients that map the lattice backward-error vector into the desired response.
In this implementation, the ladder coefficient vector is stored in self.v
(length M+1). For compatibility with ~pydaptivefiltering.base.AdaptiveFilter,
self.w mirrors self.v at each iteration and the coefficient history
recorded by the base class corresponds to the ladder coefficients.
Parameters
filter_order : int
Lattice order M (number of sections). The ladder has M+1 coefficients.
lambda_factor : float, optional
Forgetting factor lambda used in the exponentially weighted recursions.
Default is 0.99.
epsilon : float, optional
Positive initialization/regularization constant for forward and backward
energies. Default is 0.1.
w_init : ComplexArrayLike, optional
Optional initial ladder coefficients of length M+1. If None, initializes
with zeros.
safe_eps : float, optional
Small positive floor used to avoid division by (near) zero and to keep the
internal likelihood variables bounded. Default is 1e-12.
Notes
Signals and dimensions
~~~~~~
This class operates on complex-valued sequences. For lattice order M:
deltaanddelta_vhave shape(M+1,)xi_fandxi_bhave shape(M+2,)(energies per section plus guard)error_b_prevhas shape(M+2,)and stores the previous backward-error vector used for the error-feedback recursion.- At each time k, the ladder regressor is the backward-error vector
curr_b[:M+1].
Output computation
~~~~~~
The estimated output is formed as a ladder combination:
$$y(k) = \mathbf{v}^H(k)\, \mathbf{b}(k),$$
where \( \mathbf{b}(k) \) corresponds to curr_b[:M+1] and
\( \mathbf{v}(k) \) is the ladder coefficient vector self.v.
The reported error is the output error \( e(k)=d(k)-y(k) \).
References
108 def __init__( 109 self, 110 filter_order: int, 111 lambda_factor: float = 0.99, 112 epsilon: float = 0.1, 113 w_init: Optional[ComplexArrayLike] = None, 114 safe_eps: float = 1e-12, 115 ) -> None: 116 """ 117 Parameters 118 ---------- 119 filter_order: 120 Lattice order M (number of sections). The ladder has M+1 coefficients. 121 lambda_factor: 122 Forgetting factor λ. 123 epsilon: 124 Regularization/initialization constant for energies. 125 w_init: 126 Optional initial ladder coefficients (length M+1). If None, zeros. 127 safe_eps: 128 Small positive floor used to avoid division by (near) zero. 129 """ 130 super().__init__(filter_order=filter_order, w_init=w_init) 131 132 self.lam = float(lambda_factor) 133 self.epsilon = float(epsilon) 134 self.n_sections = int(filter_order) 135 self.safe_eps = float(safe_eps) 136 137 self.delta = np.zeros(self.n_sections + 1, dtype=complex) 138 139 self.xi_f = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 140 self.xi_b = np.ones(self.n_sections + 2, dtype=float) * self.epsilon 141 142 self.error_b_prev = np.zeros(self.n_sections + 2, dtype=complex) 143 144 if w_init is not None: 145 v0 = np.asarray(w_init, dtype=complex).ravel() 146 if v0.size != self.n_sections + 1: 147 raise ValueError( 148 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 149 ) 150 self.v = v0 151 else: 152 self.v = np.zeros(self.n_sections + 1, dtype=complex) 153 154 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 155 156 self.w = self.v.copy() 157 self.w_history = [] 158 self._record_history()
Parameters
filter_order: Lattice order M (number of sections). The ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization/initialization constant for energies. w_init: Optional initial ladder coefficients (length M+1). If None, zeros. safe_eps: Small positive floor used to avoid division by (near) zero.
160 @validate_input 161 def optimize( 162 self, 163 input_signal: ArrayLike, 164 desired_signal: ArrayLike, 165 verbose: bool = False, 166 return_internal_states: bool = False, 167 ) -> OptimizationResult: 168 """ 169 Executes LRLS-EF adaptation for paired sequences ``x[k]`` and ``d[k]``. 170 171 Parameters 172 ---------- 173 input_signal : array_like of complex 174 Input sequence ``x[k]`` with shape ``(N,)``. 175 desired_signal : array_like of complex 176 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 177 verbose : bool, optional 178 If True, prints the total runtime after completion. 179 return_internal_states : bool, optional 180 If True, returns selected *final* internal states in ``result.extra`` 181 (not full trajectories). 182 183 Returns 184 ------- 185 OptimizationResult 186 Result object with fields: 187 - outputs : ndarray of complex, shape ``(N,)`` 188 Estimated output sequence ``y[k]``. 189 - errors : ndarray of complex, shape ``(N,)`` 190 Output error sequence ``e[k] = d[k] - y[k]``. 191 - coefficients : ndarray 192 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 193 - error_type : str 194 Set to ``"output_error"``. 195 - extra : dict, optional 196 Present only if ``return_internal_states=True`` (see below). 197 198 Extra (when return_internal_states=True) 199 -------------------------------------- 200 xi_f : ndarray of float, shape ``(M+2,)`` 201 Final forward prediction-error energies. 202 xi_b : ndarray of float, shape ``(M+2,)`` 203 Final backward prediction-error energies. 204 delta : ndarray of complex, shape ``(M+1,)`` 205 Final lattice delta (reflection-like) state. 206 delta_v : ndarray of complex, shape ``(M+1,)`` 207 Final ladder delta state used to compute ``v``. 208 """ 209 tic: float = time() 210 211 x_in = np.asarray(input_signal, dtype=complex).ravel() 212 d_in = np.asarray(desired_signal, dtype=complex).ravel() 213 214 n_samples = int(d_in.size) 215 outputs = np.zeros(n_samples, dtype=complex) 216 errors = np.zeros(n_samples, dtype=complex) 217 218 eps = self.safe_eps 219 220 for k in range(n_samples): 221 err_f = complex(x_in[k]) 222 223 curr_b = np.zeros(self.n_sections + 2, dtype=complex) 224 curr_b[0] = x_in[k] 225 226 energy_x = float(np.real(x_in[k] * np.conj(x_in[k]))) 227 self.xi_f[0] = self.lam * self.xi_f[0] + energy_x 228 self.xi_b[0] = self.xi_f[0] 229 230 g = 1.0 231 232 for m in range(self.n_sections + 1): 233 denom_g = max(g, eps) 234 235 self.delta[m] = ( 236 self.lam * self.delta[m] 237 + (self.error_b_prev[m] * np.conj(err_f)) / denom_g 238 ) 239 240 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + eps) 241 kappa_b = self.delta[m] / (self.xi_f[m] + eps) 242 243 new_err_f = err_f - kappa_f * self.error_b_prev[m] 244 curr_b[m + 1] = self.error_b_prev[m] - kappa_b * err_f 245 246 self.xi_f[m + 1] = ( 247 self.lam * self.xi_f[m + 1] 248 + float(np.real(new_err_f * np.conj(new_err_f))) / denom_g 249 ) 250 self.xi_b[m + 1] = ( 251 self.lam * self.xi_b[m + 1] 252 + float(np.real(curr_b[m + 1] * np.conj(curr_b[m + 1]))) / denom_g 253 ) 254 255 energy_b_curr = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 256 g = g - (energy_b_curr / (self.xi_b[m] + eps)) 257 g = max(g, eps) 258 259 err_f = new_err_f 260 261 y_k = complex(np.vdot(self.v, curr_b[: self.n_sections + 1])) 262 outputs[k] = y_k 263 e_k = complex(d_in[k] - y_k) 264 errors[k] = e_k 265 266 g_ladder = 1.0 267 for m in range(self.n_sections + 1): 268 denom_gl = max(g_ladder, eps) 269 270 self.delta_v[m] = ( 271 self.lam * self.delta_v[m] 272 + (curr_b[m] * np.conj(d_in[k])) / denom_gl 273 ) 274 275 self.v[m] = self.delta_v[m] / (self.xi_b[m] + eps) 276 277 energy_b = float(np.real(curr_b[m] * np.conj(curr_b[m]))) 278 g_ladder = g_ladder - (energy_b / (self.xi_b[m] + eps)) 279 g_ladder = max(g_ladder, eps) 280 281 self.error_b_prev = curr_b 282 283 self.w = self.v.copy() 284 self._record_history() 285 286 runtime_s = float(time() - tic) 287 if verbose: 288 print(f"[LRLSErrorFeedback] Completed in {runtime_s * 1000:.02f} ms") 289 290 extra: Optional[Dict[str, Any]] = None 291 if return_internal_states: 292 extra = { 293 "xi_f": self.xi_f.copy(), 294 "xi_b": self.xi_b.copy(), 295 "delta": self.delta.copy(), 296 "delta_v": self.delta_v.copy(), 297 } 298 299 return self._pack_results( 300 outputs=outputs, 301 errors=errors, 302 runtime_s=runtime_s, 303 error_type="output_error", 304 extra=extra, 305 )
Executes LRLS-EF adaptation for paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,).
desired_signal : array_like of complex
Desired/reference sequence d[k] with shape (N,).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, returns selected final internal states in result.extra
(not full trajectories).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Estimated output sequence y[k].
- errors : ndarray of complex, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray
Ladder coefficient history (mirrors self.v via self.w).
- error_type : str
Set to "output_error".
- extra : dict, optional
Present only if return_internal_states=True (see below).
Extra (when return_internal_states=True)
xi_f : ndarray of float, shape (M+2,)
Final forward prediction-error energies.
xi_b : ndarray of float, shape (M+2,)
Final backward prediction-error energies.
delta : ndarray of complex, shape (M+1,)
Final lattice delta (reflection-like) state.
delta_v : ndarray of complex, shape (M+1,)
Final ladder delta state used to compute v.
25class LRLSPriori(AdaptiveFilter): 26 """ 27 Lattice RLS using a priori errors (LRLS, a priori form), complex-valued. 28 29 Implements Diniz (Algorithm 7.4) in a lattice/ladder structure: 30 31 1) **Lattice prediction stage** (order ``M``): 32 Produces forward a priori errors and a vector of backward errors, updating 33 reflection-like state variables and exponentially weighted energies. 34 35 2) **Ladder (joint-process) stage** (length ``M+1``): 36 Updates the ladder coefficients ``v`` using the a priori backward-error 37 vector and produces an **a priori** error associated with the desired signal. 38 39 Library conventions 40 ------------------- 41 - Complex-valued implementation (``supports_complex=True``). 42 - Ladder coefficients are stored in ``self.v`` with length ``M+1``. 43 - For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`, 44 ``self.w`` mirrors ``self.v`` at each iteration and the base-class history 45 corresponds to the ladder coefficient trajectory. 46 47 Parameters 48 ---------- 49 filter_order : int 50 Lattice order ``M`` (number of sections). The ladder has ``M+1`` coefficients. 51 lambda_factor : float, optional 52 Forgetting factor ``lambda`` used in the exponentially weighted recursions. 53 Default is 0.99. 54 epsilon : float, optional 55 Initialization/regularization constant for the energy variables 56 (forward/backward). Default is 0.1. 57 w_init : array_like of complex, optional 58 Optional initial ladder coefficients of length ``M+1``. If None, initializes 59 with zeros. 60 denom_floor : float, optional 61 Small positive floor used to avoid division by (near) zero in normalization 62 terms (``gamma`` variables and energy denominators). Default is 1e-12. 63 64 Notes 65 ----- 66 Signals and dimensions 67 ~~~~~~~~~~~~~~~~~~~~~~ 68 For lattice order ``M``: 69 70 - ``delta`` has shape ``(M,)`` (lattice delta state) 71 - ``xi_f`` and ``xi_b`` have shape ``(M+1,)`` (forward/backward energies) 72 - ``error_b_prev`` and per-sample ``alpha_b`` have shape ``(M+1,)`` 73 (backward-error vectors) 74 - ``v`` and ``delta_v`` have shape ``(M+1,)`` (ladder coefficients and state) 75 76 A priori error (as returned) 77 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 78 The ladder stage starts with ``alpha_e = d[k]`` and removes components 79 correlated with the backward-error vector: 80 81 .. math:: 82 \\alpha_e \\leftarrow \\alpha_e - v_m^*(k)\\, b_m(k), 83 84 where :math:`b_m(k)` are the backward errors (``alpha_b[m]``). The final 85 value is then scaled by the final lattice normalization factor ``gamma``: 86 87 .. math:: 88 e_{pri}(k) = \\gamma(k)\\, \\alpha_e(k). 89 90 This scaled error is returned in ``errors[k]``, and the output estimate is 91 returned as ``outputs[k] = d[k] - e_pri[k]``. 92 93 References 94 ---------- 95 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 96 Implementation*, Algorithm 7.4. 97 """ 98 99 supports_complex: bool = True 100 101 def __init__( 102 self, 103 filter_order: int, 104 lambda_factor: float = 0.99, 105 epsilon: float = 0.1, 106 w_init: Optional[Union[np.ndarray, list]] = None, 107 denom_floor: float = 1e-12, 108 ) -> None: 109 """ 110 Parameters 111 ---------- 112 filter_order: 113 Number of lattice sections M. Ladder has M+1 coefficients. 114 lambda_factor: 115 Forgetting factor λ. 116 epsilon: 117 Energy initialization / regularization. 118 w_init: 119 Optional initial ladder coefficient vector (length M+1). If None, zeros. 120 denom_floor: 121 Floor used to avoid division by (near) zero in normalization terms. 122 """ 123 super().__init__(filter_order=filter_order, w_init=w_init) 124 125 self.lam = float(lambda_factor) 126 self.epsilon = float(epsilon) 127 self.n_sections = int(filter_order) 128 self._tiny = float(denom_floor) 129 130 self.delta = np.zeros(self.n_sections, dtype=complex) 131 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 132 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 133 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 134 135 if w_init is not None: 136 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 137 if v0.size != self.n_sections + 1: 138 raise ValueError( 139 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 140 ) 141 self.v = v0 142 else: 143 self.v = np.zeros(self.n_sections + 1, dtype=complex) 144 145 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 146 147 # Mirror to base API 148 self.w = self.v.copy() 149 self.w_history = [] 150 self._record_history() 151 152 @validate_input 153 def optimize( 154 self, 155 input_signal: np.ndarray, 156 desired_signal: np.ndarray, 157 verbose: bool = False, 158 return_internal_states: bool = False, 159 ) -> OptimizationResult: 160 """ 161 Executes LRLS adaptation (a priori form) over paired sequences ``x[k]`` and ``d[k]``. 162 163 Parameters 164 ---------- 165 input_signal : array_like of complex 166 Input sequence ``x[k]`` with shape ``(N,)``. 167 desired_signal : array_like of complex 168 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 169 verbose : bool, optional 170 If True, prints the total runtime after completion. 171 return_internal_states : bool, optional 172 If True, returns selected *final* internal states in ``result.extra`` 173 (not full trajectories). 174 175 Returns 176 ------- 177 OptimizationResult 178 Result object with fields: 179 - outputs : ndarray of complex, shape ``(N,)`` 180 Estimated output sequence. In this implementation: 181 ``outputs[k] = d[k] - e_pri[k]``. 182 - errors : ndarray of complex, shape ``(N,)`` 183 A priori ladder error scaled by the final lattice normalization 184 factor: ``e_pri[k] = gamma[k] * alpha_e[k]``. 185 - coefficients : ndarray 186 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 187 - error_type : str 188 Set to ``"a_priori"``. 189 - extra : dict, optional 190 Present only if ``return_internal_states=True`` (see below). 191 192 Extra (when return_internal_states=True) 193 -------------------------------------- 194 xi_f : ndarray of float, shape ``(M+1,)`` 195 Final forward energies. 196 xi_b : ndarray of float, shape ``(M+1,)`` 197 Final backward energies. 198 delta : ndarray of complex, shape ``(M,)`` 199 Final lattice delta state. 200 delta_v : ndarray of complex, shape ``(M+1,)`` 201 Final ladder delta state used to compute ``v``. 202 """ 203 t0 = perf_counter() 204 205 # validate_input already normalizes to 1D and matches lengths. 206 # Force complex to respect supports_complex=True (even if x/d are real). 207 x_in = np.asarray(input_signal, dtype=complex).ravel() 208 d_in = np.asarray(desired_signal, dtype=complex).ravel() 209 210 n_samples = int(d_in.size) 211 outputs = np.zeros(n_samples, dtype=complex) 212 errors = np.zeros(n_samples, dtype=complex) 213 214 for k in range(n_samples): 215 alpha_f = complex(x_in[k]) 216 217 alpha_b = np.zeros(self.n_sections + 1, dtype=complex) 218 alpha_b[0] = x_in[k] 219 220 gamma = 1.0 221 gamma_orders = np.ones(self.n_sections + 1, dtype=float) 222 223 # ------------------------- 224 # Lattice stage (a priori) 225 # ------------------------- 226 for m in range(self.n_sections): 227 gamma_orders[m] = gamma 228 denom_g = max(gamma, self._tiny) 229 230 self.delta[m] = ( 231 self.lam * self.delta[m] 232 + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g 233 ) 234 235 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 236 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 237 238 alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m] 239 alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f 240 241 # Energy updates (kept as in your code, with safe denominators) 242 self.xi_f[m] = ( 243 self.lam * self.xi_f[m] 244 + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g 245 ) 246 self.xi_b[m] = ( 247 self.lam * self.xi_b[m] 248 + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g 249 ) 250 251 denom_xib = self.xi_b[m] + self._tiny 252 gamma_next = gamma - ( 253 float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib 254 ) 255 gamma = max(gamma_next, self._tiny) 256 alpha_f = alpha_f_next 257 258 gamma_orders[self.n_sections] = gamma 259 self.xi_f[self.n_sections] = ( 260 self.lam * self.xi_f[self.n_sections] 261 + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny) 262 ) 263 self.xi_b[self.n_sections] = ( 264 self.lam * self.xi_b[self.n_sections] 265 + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections]))) 266 / max(gamma, self._tiny) 267 ) 268 269 # ------------------------- 270 # Ladder stage (a priori) 271 # ------------------------- 272 alpha_e = complex(d_in[k]) 273 274 for m in range(self.n_sections + 1): 275 denom_go = max(gamma_orders[m], self._tiny) 276 277 self.delta_v[m] = ( 278 self.lam * self.delta_v[m] 279 + (alpha_b[m] * np.conj(alpha_e)) / denom_go 280 ) 281 282 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 283 alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m] 284 285 e_k = alpha_e * gamma 286 errors[k] = e_k 287 outputs[k] = d_in[k] - e_k 288 289 self.error_b_prev = alpha_b.copy() 290 291 # Mirror ladder coeffs into base API + record history 292 self.w = self.v.copy() 293 self._record_history() 294 295 runtime_s = float(perf_counter() - t0) 296 if verbose: 297 print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms") 298 299 extra: Optional[Dict[str, Any]] = None 300 if return_internal_states: 301 extra = { 302 "xi_f": self.xi_f.copy(), 303 "xi_b": self.xi_b.copy(), 304 "delta": self.delta.copy(), 305 "delta_v": self.delta_v.copy(), 306 } 307 308 return self._pack_results( 309 outputs=outputs, 310 errors=errors, 311 runtime_s=runtime_s, 312 error_type="a_priori", 313 extra=extra, 314 )
Lattice RLS using a priori errors (LRLS, a priori form), complex-valued.
Implements Diniz (Algorithm 7.4) in a lattice/ladder structure:
1) Lattice prediction stage (order M):
Produces forward a priori errors and a vector of backward errors, updating
reflection-like state variables and exponentially weighted energies.
2) Ladder (joint-process) stage (length M+1):
Updates the ladder coefficients v using the a priori backward-error
vector and produces an a priori error associated with the desired signal.
Library conventions
- Complex-valued implementation (
supports_complex=True). - Ladder coefficients are stored in
self.vwith lengthM+1. - For compatibility with
~pydaptivefiltering.base.AdaptiveFilter,self.wmirrorsself.vat each iteration and the base-class history corresponds to the ladder coefficient trajectory.
Parameters
filter_order : int
Lattice order M (number of sections). The ladder has M+1 coefficients.
lambda_factor : float, optional
Forgetting factor lambda used in the exponentially weighted recursions.
Default is 0.99.
epsilon : float, optional
Initialization/regularization constant for the energy variables
(forward/backward). Default is 0.1.
w_init : array_like of complex, optional
Optional initial ladder coefficients of length M+1. If None, initializes
with zeros.
denom_floor : float, optional
Small positive floor used to avoid division by (near) zero in normalization
terms (gamma variables and energy denominators). Default is 1e-12.
Notes
Signals and dimensions
~~~~~~
For lattice order M:
deltahas shape(M,)(lattice delta state)xi_fandxi_bhave shape(M+1,)(forward/backward energies)error_b_prevand per-samplealpha_bhave shape(M+1,)(backward-error vectors)vanddelta_vhave shape(M+1,)(ladder coefficients and state)
A priori error (as returned)
~~~~~~~~
The ladder stage starts with alpha_e = d[k] and removes components
correlated with the backward-error vector:
$$\alpha_e \leftarrow \alpha_e - v_m^*(k)\, b_m(k),$$
where \( b_m(k) \) are the backward errors (alpha_b[m]). The final
value is then scaled by the final lattice normalization factor gamma:
$$e_{pri}(k) = \gamma(k)\, \alpha_e(k).$$
This scaled error is returned in errors[k], and the output estimate is
returned as outputs[k] = d[k] - e_pri[k].
References
101 def __init__( 102 self, 103 filter_order: int, 104 lambda_factor: float = 0.99, 105 epsilon: float = 0.1, 106 w_init: Optional[Union[np.ndarray, list]] = None, 107 denom_floor: float = 1e-12, 108 ) -> None: 109 """ 110 Parameters 111 ---------- 112 filter_order: 113 Number of lattice sections M. Ladder has M+1 coefficients. 114 lambda_factor: 115 Forgetting factor λ. 116 epsilon: 117 Energy initialization / regularization. 118 w_init: 119 Optional initial ladder coefficient vector (length M+1). If None, zeros. 120 denom_floor: 121 Floor used to avoid division by (near) zero in normalization terms. 122 """ 123 super().__init__(filter_order=filter_order, w_init=w_init) 124 125 self.lam = float(lambda_factor) 126 self.epsilon = float(epsilon) 127 self.n_sections = int(filter_order) 128 self._tiny = float(denom_floor) 129 130 self.delta = np.zeros(self.n_sections, dtype=complex) 131 self.xi_f = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 132 self.xi_b = np.ones(self.n_sections + 1, dtype=float) * self.epsilon 133 self.error_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 134 135 if w_init is not None: 136 v0 = np.asarray(w_init, dtype=complex).reshape(-1) 137 if v0.size != self.n_sections + 1: 138 raise ValueError( 139 f"w_init must have length {self.n_sections + 1}, got {v0.size}" 140 ) 141 self.v = v0 142 else: 143 self.v = np.zeros(self.n_sections + 1, dtype=complex) 144 145 self.delta_v = np.zeros(self.n_sections + 1, dtype=complex) 146 147 # Mirror to base API 148 self.w = self.v.copy() 149 self.w_history = [] 150 self._record_history()
Parameters
filter_order: Number of lattice sections M. Ladder has M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Energy initialization / regularization. w_init: Optional initial ladder coefficient vector (length M+1). If None, zeros. denom_floor: Floor used to avoid division by (near) zero in normalization terms.
152 @validate_input 153 def optimize( 154 self, 155 input_signal: np.ndarray, 156 desired_signal: np.ndarray, 157 verbose: bool = False, 158 return_internal_states: bool = False, 159 ) -> OptimizationResult: 160 """ 161 Executes LRLS adaptation (a priori form) over paired sequences ``x[k]`` and ``d[k]``. 162 163 Parameters 164 ---------- 165 input_signal : array_like of complex 166 Input sequence ``x[k]`` with shape ``(N,)``. 167 desired_signal : array_like of complex 168 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 169 verbose : bool, optional 170 If True, prints the total runtime after completion. 171 return_internal_states : bool, optional 172 If True, returns selected *final* internal states in ``result.extra`` 173 (not full trajectories). 174 175 Returns 176 ------- 177 OptimizationResult 178 Result object with fields: 179 - outputs : ndarray of complex, shape ``(N,)`` 180 Estimated output sequence. In this implementation: 181 ``outputs[k] = d[k] - e_pri[k]``. 182 - errors : ndarray of complex, shape ``(N,)`` 183 A priori ladder error scaled by the final lattice normalization 184 factor: ``e_pri[k] = gamma[k] * alpha_e[k]``. 185 - coefficients : ndarray 186 Ladder coefficient history (mirrors ``self.v`` via ``self.w``). 187 - error_type : str 188 Set to ``"a_priori"``. 189 - extra : dict, optional 190 Present only if ``return_internal_states=True`` (see below). 191 192 Extra (when return_internal_states=True) 193 -------------------------------------- 194 xi_f : ndarray of float, shape ``(M+1,)`` 195 Final forward energies. 196 xi_b : ndarray of float, shape ``(M+1,)`` 197 Final backward energies. 198 delta : ndarray of complex, shape ``(M,)`` 199 Final lattice delta state. 200 delta_v : ndarray of complex, shape ``(M+1,)`` 201 Final ladder delta state used to compute ``v``. 202 """ 203 t0 = perf_counter() 204 205 # validate_input already normalizes to 1D and matches lengths. 206 # Force complex to respect supports_complex=True (even if x/d are real). 207 x_in = np.asarray(input_signal, dtype=complex).ravel() 208 d_in = np.asarray(desired_signal, dtype=complex).ravel() 209 210 n_samples = int(d_in.size) 211 outputs = np.zeros(n_samples, dtype=complex) 212 errors = np.zeros(n_samples, dtype=complex) 213 214 for k in range(n_samples): 215 alpha_f = complex(x_in[k]) 216 217 alpha_b = np.zeros(self.n_sections + 1, dtype=complex) 218 alpha_b[0] = x_in[k] 219 220 gamma = 1.0 221 gamma_orders = np.ones(self.n_sections + 1, dtype=float) 222 223 # ------------------------- 224 # Lattice stage (a priori) 225 # ------------------------- 226 for m in range(self.n_sections): 227 gamma_orders[m] = gamma 228 denom_g = max(gamma, self._tiny) 229 230 self.delta[m] = ( 231 self.lam * self.delta[m] 232 + (self.error_b_prev[m] * np.conj(alpha_f)) / denom_g 233 ) 234 235 kappa_f = np.conj(self.delta[m]) / (self.xi_b[m] + self._tiny) 236 kappa_b = self.delta[m] / (self.xi_f[m] + self._tiny) 237 238 alpha_f_next = alpha_f - kappa_f * self.error_b_prev[m] 239 alpha_b[m + 1] = self.error_b_prev[m] - kappa_b * alpha_f 240 241 # Energy updates (kept as in your code, with safe denominators) 242 self.xi_f[m] = ( 243 self.lam * self.xi_f[m] 244 + float(np.real(alpha_f * np.conj(alpha_f))) / denom_g 245 ) 246 self.xi_b[m] = ( 247 self.lam * self.xi_b[m] 248 + float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_g 249 ) 250 251 denom_xib = self.xi_b[m] + self._tiny 252 gamma_next = gamma - ( 253 float(np.real(alpha_b[m] * np.conj(alpha_b[m]))) / denom_xib 254 ) 255 gamma = max(gamma_next, self._tiny) 256 alpha_f = alpha_f_next 257 258 gamma_orders[self.n_sections] = gamma 259 self.xi_f[self.n_sections] = ( 260 self.lam * self.xi_f[self.n_sections] 261 + float(np.real(alpha_f * np.conj(alpha_f))) / max(gamma, self._tiny) 262 ) 263 self.xi_b[self.n_sections] = ( 264 self.lam * self.xi_b[self.n_sections] 265 + float(np.real(alpha_b[self.n_sections] * np.conj(alpha_b[self.n_sections]))) 266 / max(gamma, self._tiny) 267 ) 268 269 # ------------------------- 270 # Ladder stage (a priori) 271 # ------------------------- 272 alpha_e = complex(d_in[k]) 273 274 for m in range(self.n_sections + 1): 275 denom_go = max(gamma_orders[m], self._tiny) 276 277 self.delta_v[m] = ( 278 self.lam * self.delta_v[m] 279 + (alpha_b[m] * np.conj(alpha_e)) / denom_go 280 ) 281 282 self.v[m] = self.delta_v[m] / (self.xi_b[m] + self._tiny) 283 alpha_e = alpha_e - np.conj(self.v[m]) * alpha_b[m] 284 285 e_k = alpha_e * gamma 286 errors[k] = e_k 287 outputs[k] = d_in[k] - e_k 288 289 self.error_b_prev = alpha_b.copy() 290 291 # Mirror ladder coeffs into base API + record history 292 self.w = self.v.copy() 293 self._record_history() 294 295 runtime_s = float(perf_counter() - t0) 296 if verbose: 297 print(f"[LRLSPriori] Completed in {runtime_s * 1000:.02f} ms") 298 299 extra: Optional[Dict[str, Any]] = None 300 if return_internal_states: 301 extra = { 302 "xi_f": self.xi_f.copy(), 303 "xi_b": self.xi_b.copy(), 304 "delta": self.delta.copy(), 305 "delta_v": self.delta_v.copy(), 306 } 307 308 return self._pack_results( 309 outputs=outputs, 310 errors=errors, 311 runtime_s=runtime_s, 312 error_type="a_priori", 313 extra=extra, 314 )
Executes LRLS adaptation (a priori form) over paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,).
desired_signal : array_like of complex
Desired/reference sequence d[k] with shape (N,).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, returns selected final internal states in result.extra
(not full trajectories).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Estimated output sequence. In this implementation:
outputs[k] = d[k] - e_pri[k].
- errors : ndarray of complex, shape (N,)
A priori ladder error scaled by the final lattice normalization
factor: e_pri[k] = gamma[k] * alpha_e[k].
- coefficients : ndarray
Ladder coefficient history (mirrors self.v via self.w).
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True (see below).
Extra (when return_internal_states=True)
xi_f : ndarray of float, shape (M+1,)
Final forward energies.
xi_b : ndarray of float, shape (M+1,)
Final backward energies.
delta : ndarray of complex, shape (M,)
Final lattice delta state.
delta_v : ndarray of complex, shape (M+1,)
Final ladder delta state used to compute v.
26class NormalizedLRLS(AdaptiveFilter): 27 """ 28 Normalized Lattice RLS (NLRLS) based on a posteriori error, complex-valued. 29 30 Implements Diniz (Algorithm 7.6). This variant introduces *normalized* 31 internal variables so that key quantities (normalized forward/backward errors 32 and reflection-like coefficients) are designed to be magnitude-bounded by 1, 33 improving numerical robustness. 34 35 The algorithm has two coupled stages: 36 37 1) **Prediction stage (lattice, order M)**: 38 Computes normalized forward/backward a posteriori errors (``bar_f``, ``bar_b``) 39 and updates normalized reflection-like coefficients ``rho``. 40 41 2) **Estimation stage (normalized ladder, length M+1)**: 42 Updates normalized coefficients ``rho_v`` and produces a normalized 43 a posteriori error ``bar_e``. The returned error is the *de-normalized* 44 error ``e = bar_e * xi_half``. 45 46 Library conventions 47 ------------------- 48 - Complex-valued implementation (``supports_complex=True``). 49 - The exposed coefficient vector is ``rho_v`` (length ``M+1``). 50 For compatibility with :class:`~pydaptivefiltering.base.AdaptiveFilter`: 51 * ``self.w`` mirrors ``self.rho_v`` at each iteration. 52 * history recorded by ``_record_history()`` corresponds to ``rho_v``. 53 54 Parameters 55 ---------- 56 filter_order : int 57 Lattice order ``M`` (number of sections). The estimation stage uses 58 ``M+1`` coefficients. 59 lambda_factor : float, optional 60 Forgetting factor ``lambda`` used in the exponentially weighted updates. 61 Default is 0.99. 62 epsilon : float, optional 63 Small positive constant used for regularization in normalizations, 64 magnitude clipping, and denominator protection. Default is 1e-6. 65 w_init : array_like of complex, optional 66 Optional initialization for ``rho_v`` with length ``M+1``. 67 If None, initializes with zeros. 68 denom_floor : float, optional 69 Extra floor for denominators and sqrt protections. Default is 1e-12. 70 71 Notes 72 ----- 73 Normalized variables 74 ~~~~~~~~~~~~~~~~~~~~ 75 The implementation uses the following normalized quantities: 76 77 - ``xi_half``: square-root energy tracker (scalar). It normalizes the input/output 78 so that normalized errors stay bounded. 79 - ``bar_f``: normalized forward error for the current section. 80 - ``bar_b_prev`` / ``bar_b_curr``: normalized backward error vectors, shape ``(M+1,)``. 81 - ``bar_e``: normalized a posteriori error in the estimation stage. 82 - ``rho``: normalized reflection-like coefficients for the lattice stage, shape ``(M,)``. 83 - ``rho_v``: normalized coefficients for the estimation stage, shape ``(M+1,)``. 84 85 Magnitude bounding 86 ~~~~~~~~~~~~~~~~~~ 87 Several variables are clipped to satisfy ``|z| <= 1``. The terms 88 ``sqrt(1 - |z|^2)`` act like cosine factors in the normalized recursions and 89 are safeguarded with ``_safe_sqrt`` to avoid negative arguments caused by 90 round-off. 91 92 Output and error returned 93 ~~~~~~~~~~~~~~~~~~~~~~~~~ 94 The filter returns the de-normalized a posteriori error: 95 96 ``errors[k] = bar_e[k] * xi_half[k]`` 97 98 and the output estimate: 99 100 ``outputs[k] = d[k] - errors[k]``. 101 102 References 103 ---------- 104 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 105 Implementation*, Algorithm 7.6. 106 """ 107 108 supports_complex: bool = True 109 110 def __init__( 111 self, 112 filter_order: int, 113 lambda_factor: float = 0.99, 114 epsilon: float = 1e-6, 115 w_init: Optional[ArrayLike] = None, 116 denom_floor: float = 1e-12, 117 ) -> None: 118 """ 119 Parameters 120 ---------- 121 filter_order: 122 Number of lattice sections M. The estimation stage uses M+1 coefficients. 123 lambda_factor: 124 Forgetting factor λ. 125 epsilon: 126 Regularization used in normalizations and clipping. 127 w_init: 128 Optional initialization for rho_v (length M+1). If None, zeros. 129 denom_floor: 130 Extra floor for denominators / sqrt protections. 131 """ 132 super().__init__(filter_order=filter_order, w_init=w_init) 133 134 self.lam = float(lambda_factor) 135 self.epsilon = float(epsilon) 136 self.n_sections = int(filter_order) 137 self._tiny = float(denom_floor) 138 139 self.rho = np.zeros(self.n_sections, dtype=complex) 140 141 if w_init is not None: 142 rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1) 143 if rho_v0.size != self.n_sections + 1: 144 raise ValueError( 145 f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}" 146 ) 147 self.rho_v = rho_v0 148 else: 149 self.rho_v = np.zeros(self.n_sections + 1, dtype=complex) 150 151 self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 152 self.xi_half = float(np.sqrt(self.epsilon)) 153 154 self.w = self.rho_v.copy() 155 self.w_history = [] 156 self._record_history() 157 158 @staticmethod 159 def _safe_sqrt(value: float) -> float: 160 """ 161 Computes sqrt(max(value, 0.0)) to avoid negative arguments due to rounding. 162 """ 163 return float(np.sqrt(max(0.0, float(value)))) 164 165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Run the Normalized LRLS (NLRLS) recursion over paired sequences ``x[k]`` and ``d[k]``. 175 176 Parameters 177 ---------- 178 input_signal : array_like of complex 179 Input sequence ``x[k]`` with shape ``(N,)``. 180 desired_signal : array_like of complex 181 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 182 verbose : bool, optional 183 If True, prints the total runtime after completion. 184 return_internal_states : bool, optional 185 If True, returns selected *final* internal states in ``result.extra`` 186 (not full trajectories). 187 188 Returns 189 ------- 190 OptimizationResult 191 outputs : ndarray of complex, shape ``(N,)`` 192 Estimated output sequence ``y[k] = d[k] - e_post[k]``. 193 errors : ndarray of complex, shape ``(N,)`` 194 De-normalized a posteriori error ``e_post[k] = bar_e[k] * xi_half[k]``. 195 coefficients : ndarray 196 History of ``rho_v`` (mirrors ``self.rho_v`` via ``self.w``). 197 error_type : str 198 Set to ``"a_posteriori"``. 199 extra : dict, optional 200 Present only if ``return_internal_states=True`` (see below). 201 202 Extra (when return_internal_states=True) 203 -------------------------------------- 204 rho : ndarray of complex, shape ``(M,)`` 205 Final normalized lattice reflection-like coefficients. 206 rho_v : ndarray of complex, shape ``(M+1,)`` 207 Final normalized estimation-stage coefficients. 208 xi_half : float 209 Final square-root energy tracker used for normalization. 210 """ 211 t0 = perf_counter() 212 213 # validate_input already normalizes to 1D and matches lengths. 214 # Force complex to respect supports_complex=True (even if x/d are real). 215 x_in = np.asarray(input_signal, dtype=complex).ravel() 216 d_in = np.asarray(desired_signal, dtype=complex).ravel() 217 218 n_samples = int(d_in.size) 219 outputs = np.zeros(n_samples, dtype=complex) 220 errors = np.zeros(n_samples, dtype=complex) 221 222 sqrt_lam = float(np.sqrt(self.lam)) 223 224 for k in range(n_samples): 225 # Update xi_half (sqrt energy) 226 xi_sq = float(self.xi_half**2) 227 xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2)) 228 self.xi_half = self._safe_sqrt(xi_sq) 229 230 denom_x = float(self.xi_half + self.epsilon) 231 bar_f = x_in[k] / denom_x 232 233 abs_bf = np.abs(bar_f) 234 if abs_bf > 1.0: 235 bar_f = bar_f / abs_bf 236 237 bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex) 238 bar_b_curr[0] = bar_f 239 240 # ------------------------- 241 # Prediction stage 242 # ------------------------- 243 for m in range(self.n_sections): 244 cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2)) 245 cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2)) 246 247 self.rho[m] = ( 248 (sqrt_lam * cos_f * cos_b_prev * self.rho[m]) 249 + (np.conj(bar_f) * self.bar_b_prev[m]) 250 ) 251 252 abs_rho = np.abs(self.rho[m]) 253 if abs_rho >= 1.0: 254 self.rho[m] = self.rho[m] / (abs_rho + self.epsilon) 255 256 cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2)) 257 258 denom_f = float((cos_rho * cos_b_prev) + self.epsilon) 259 denom_b = float((cos_rho * cos_f) + self.epsilon) 260 261 f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f 262 b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b 263 264 bar_f = f_next 265 bar_b_curr[m + 1] = b_next 266 267 # ------------------------- 268 # Estimation stage 269 # ------------------------- 270 bar_e = d_in[k] / float(self.xi_half + self.epsilon) 271 abs_be = np.abs(bar_e) 272 if abs_be > 1.0: 273 bar_e = bar_e / abs_be 274 275 for m in range(self.n_sections + 1): 276 cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2)) 277 cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2)) 278 279 self.rho_v[m] = ( 280 (sqrt_lam * cos_e * cos_b * self.rho_v[m]) 281 + (np.conj(bar_e) * bar_b_curr[m]) 282 ) 283 284 abs_rv = np.abs(self.rho_v[m]) 285 if abs_rv >= 1.0: 286 self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon) 287 288 cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2)) 289 290 denom_e = float((cos_rho_v * cos_b) + self.epsilon) 291 bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e 292 293 errors[k] = bar_e * self.xi_half 294 outputs[k] = d_in[k] - errors[k] 295 296 self.bar_b_prev = bar_b_curr.copy() 297 298 self.w = self.rho_v.copy() 299 self._record_history() 300 301 runtime_s = float(perf_counter() - t0) 302 if verbose: 303 print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms") 304 305 extra: Optional[Dict[str, Any]] = None 306 if return_internal_states: 307 extra = { 308 "rho": self.rho.copy(), 309 "rho_v": self.rho_v.copy(), 310 "xi_half": self.xi_half, 311 } 312 313 return self._pack_results( 314 outputs=outputs, 315 errors=errors, 316 runtime_s=runtime_s, 317 error_type="a_posteriori", 318 extra=extra, 319 )
Normalized Lattice RLS (NLRLS) based on a posteriori error, complex-valued.
Implements Diniz (Algorithm 7.6). This variant introduces normalized internal variables so that key quantities (normalized forward/backward errors and reflection-like coefficients) are designed to be magnitude-bounded by 1, improving numerical robustness.
The algorithm has two coupled stages:
1) Prediction stage (lattice, order M):
Computes normalized forward/backward a posteriori errors (bar_f, bar_b)
and updates normalized reflection-like coefficients rho.
2) Estimation stage (normalized ladder, length M+1):
Updates normalized coefficients rho_v and produces a normalized
a posteriori error bar_e. The returned error is the de-normalized
error e = bar_e * xi_half.
Library conventions
- Complex-valued implementation (
supports_complex=True). - The exposed coefficient vector is
rho_v(lengthM+1). For compatibility with~pydaptivefiltering.base.AdaptiveFilter:self.wmirrorsself.rho_vat each iteration.- history recorded by
_record_history()corresponds torho_v.
Parameters
filter_order : int
Lattice order M (number of sections). The estimation stage uses
M+1 coefficients.
lambda_factor : float, optional
Forgetting factor lambda used in the exponentially weighted updates.
Default is 0.99.
epsilon : float, optional
Small positive constant used for regularization in normalizations,
magnitude clipping, and denominator protection. Default is 1e-6.
w_init : array_like of complex, optional
Optional initialization for rho_v with length M+1.
If None, initializes with zeros.
denom_floor : float, optional
Extra floor for denominators and sqrt protections. Default is 1e-12.
Notes
Normalized variables
~~~~
The implementation uses the following normalized quantities:
xi_half: square-root energy tracker (scalar). It normalizes the input/output so that normalized errors stay bounded.bar_f: normalized forward error for the current section.bar_b_prev/bar_b_curr: normalized backward error vectors, shape(M+1,).bar_e: normalized a posteriori error in the estimation stage.rho: normalized reflection-like coefficients for the lattice stage, shape(M,).rho_v: normalized coefficients for the estimation stage, shape(M+1,).
Magnitude bounding
~~~~~~
Several variables are clipped to satisfy |z| <= 1. The terms
sqrt(1 - |z|^2) act like cosine factors in the normalized recursions and
are safeguarded with _safe_sqrt to avoid negative arguments caused by
round-off.
Output and error returned
~~~~~
The filter returns the de-normalized a posteriori error:
errors[k] = bar_e[k] * xi_half[k]
and the output estimate:
outputs[k] = d[k] - errors[k].
References
110 def __init__( 111 self, 112 filter_order: int, 113 lambda_factor: float = 0.99, 114 epsilon: float = 1e-6, 115 w_init: Optional[ArrayLike] = None, 116 denom_floor: float = 1e-12, 117 ) -> None: 118 """ 119 Parameters 120 ---------- 121 filter_order: 122 Number of lattice sections M. The estimation stage uses M+1 coefficients. 123 lambda_factor: 124 Forgetting factor λ. 125 epsilon: 126 Regularization used in normalizations and clipping. 127 w_init: 128 Optional initialization for rho_v (length M+1). If None, zeros. 129 denom_floor: 130 Extra floor for denominators / sqrt protections. 131 """ 132 super().__init__(filter_order=filter_order, w_init=w_init) 133 134 self.lam = float(lambda_factor) 135 self.epsilon = float(epsilon) 136 self.n_sections = int(filter_order) 137 self._tiny = float(denom_floor) 138 139 self.rho = np.zeros(self.n_sections, dtype=complex) 140 141 if w_init is not None: 142 rho_v0 = np.asarray(w_init, dtype=complex).reshape(-1) 143 if rho_v0.size != self.n_sections + 1: 144 raise ValueError( 145 f"w_init must have length {self.n_sections + 1}, got {rho_v0.size}" 146 ) 147 self.rho_v = rho_v0 148 else: 149 self.rho_v = np.zeros(self.n_sections + 1, dtype=complex) 150 151 self.bar_b_prev = np.zeros(self.n_sections + 1, dtype=complex) 152 self.xi_half = float(np.sqrt(self.epsilon)) 153 154 self.w = self.rho_v.copy() 155 self.w_history = [] 156 self._record_history()
Parameters
filter_order: Number of lattice sections M. The estimation stage uses M+1 coefficients. lambda_factor: Forgetting factor λ. epsilon: Regularization used in normalizations and clipping. w_init: Optional initialization for rho_v (length M+1). If None, zeros. denom_floor: Extra floor for denominators / sqrt protections.
165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Run the Normalized LRLS (NLRLS) recursion over paired sequences ``x[k]`` and ``d[k]``. 175 176 Parameters 177 ---------- 178 input_signal : array_like of complex 179 Input sequence ``x[k]`` with shape ``(N,)``. 180 desired_signal : array_like of complex 181 Desired/reference sequence ``d[k]`` with shape ``(N,)``. 182 verbose : bool, optional 183 If True, prints the total runtime after completion. 184 return_internal_states : bool, optional 185 If True, returns selected *final* internal states in ``result.extra`` 186 (not full trajectories). 187 188 Returns 189 ------- 190 OptimizationResult 191 outputs : ndarray of complex, shape ``(N,)`` 192 Estimated output sequence ``y[k] = d[k] - e_post[k]``. 193 errors : ndarray of complex, shape ``(N,)`` 194 De-normalized a posteriori error ``e_post[k] = bar_e[k] * xi_half[k]``. 195 coefficients : ndarray 196 History of ``rho_v`` (mirrors ``self.rho_v`` via ``self.w``). 197 error_type : str 198 Set to ``"a_posteriori"``. 199 extra : dict, optional 200 Present only if ``return_internal_states=True`` (see below). 201 202 Extra (when return_internal_states=True) 203 -------------------------------------- 204 rho : ndarray of complex, shape ``(M,)`` 205 Final normalized lattice reflection-like coefficients. 206 rho_v : ndarray of complex, shape ``(M+1,)`` 207 Final normalized estimation-stage coefficients. 208 xi_half : float 209 Final square-root energy tracker used for normalization. 210 """ 211 t0 = perf_counter() 212 213 # validate_input already normalizes to 1D and matches lengths. 214 # Force complex to respect supports_complex=True (even if x/d are real). 215 x_in = np.asarray(input_signal, dtype=complex).ravel() 216 d_in = np.asarray(desired_signal, dtype=complex).ravel() 217 218 n_samples = int(d_in.size) 219 outputs = np.zeros(n_samples, dtype=complex) 220 errors = np.zeros(n_samples, dtype=complex) 221 222 sqrt_lam = float(np.sqrt(self.lam)) 223 224 for k in range(n_samples): 225 # Update xi_half (sqrt energy) 226 xi_sq = float(self.xi_half**2) 227 xi_sq = float(self.lam * xi_sq + (np.abs(x_in[k]) ** 2)) 228 self.xi_half = self._safe_sqrt(xi_sq) 229 230 denom_x = float(self.xi_half + self.epsilon) 231 bar_f = x_in[k] / denom_x 232 233 abs_bf = np.abs(bar_f) 234 if abs_bf > 1.0: 235 bar_f = bar_f / abs_bf 236 237 bar_b_curr = np.zeros(self.n_sections + 1, dtype=complex) 238 bar_b_curr[0] = bar_f 239 240 # ------------------------- 241 # Prediction stage 242 # ------------------------- 243 for m in range(self.n_sections): 244 cos_f = self._safe_sqrt(1.0 - (np.abs(bar_f) ** 2)) 245 cos_b_prev = self._safe_sqrt(1.0 - (np.abs(self.bar_b_prev[m]) ** 2)) 246 247 self.rho[m] = ( 248 (sqrt_lam * cos_f * cos_b_prev * self.rho[m]) 249 + (np.conj(bar_f) * self.bar_b_prev[m]) 250 ) 251 252 abs_rho = np.abs(self.rho[m]) 253 if abs_rho >= 1.0: 254 self.rho[m] = self.rho[m] / (abs_rho + self.epsilon) 255 256 cos_rho = self._safe_sqrt(1.0 - (np.abs(self.rho[m]) ** 2)) 257 258 denom_f = float((cos_rho * cos_b_prev) + self.epsilon) 259 denom_b = float((cos_rho * cos_f) + self.epsilon) 260 261 f_next = (bar_f - self.rho[m] * self.bar_b_prev[m]) / denom_f 262 b_next = (self.bar_b_prev[m] - np.conj(self.rho[m]) * bar_f) / denom_b 263 264 bar_f = f_next 265 bar_b_curr[m + 1] = b_next 266 267 # ------------------------- 268 # Estimation stage 269 # ------------------------- 270 bar_e = d_in[k] / float(self.xi_half + self.epsilon) 271 abs_be = np.abs(bar_e) 272 if abs_be > 1.0: 273 bar_e = bar_e / abs_be 274 275 for m in range(self.n_sections + 1): 276 cos_e = self._safe_sqrt(1.0 - (np.abs(bar_e) ** 2)) 277 cos_b = self._safe_sqrt(1.0 - (np.abs(bar_b_curr[m]) ** 2)) 278 279 self.rho_v[m] = ( 280 (sqrt_lam * cos_e * cos_b * self.rho_v[m]) 281 + (np.conj(bar_e) * bar_b_curr[m]) 282 ) 283 284 abs_rv = np.abs(self.rho_v[m]) 285 if abs_rv >= 1.0: 286 self.rho_v[m] = self.rho_v[m] / (abs_rv + self.epsilon) 287 288 cos_rho_v = self._safe_sqrt(1.0 - (np.abs(self.rho_v[m]) ** 2)) 289 290 denom_e = float((cos_rho_v * cos_b) + self.epsilon) 291 bar_e = (bar_e - self.rho_v[m] * bar_b_curr[m]) / denom_e 292 293 errors[k] = bar_e * self.xi_half 294 outputs[k] = d_in[k] - errors[k] 295 296 self.bar_b_prev = bar_b_curr.copy() 297 298 self.w = self.rho_v.copy() 299 self._record_history() 300 301 runtime_s = float(perf_counter() - t0) 302 if verbose: 303 print(f"[NormalizedLRLS] Completed in {runtime_s * 1000:.02f} ms") 304 305 extra: Optional[Dict[str, Any]] = None 306 if return_internal_states: 307 extra = { 308 "rho": self.rho.copy(), 309 "rho_v": self.rho_v.copy(), 310 "xi_half": self.xi_half, 311 } 312 313 return self._pack_results( 314 outputs=outputs, 315 errors=errors, 316 runtime_s=runtime_s, 317 error_type="a_posteriori", 318 extra=extra, 319 )
Run the Normalized LRLS (NLRLS) recursion over paired sequences x[k] and d[k].
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,).
desired_signal : array_like of complex
Desired/reference sequence d[k] with shape (N,).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, returns selected final internal states in result.extra
(not full trajectories).
Returns
OptimizationResult
outputs : ndarray of complex, shape (N,)
Estimated output sequence y[k] = d[k] - e_post[k].
errors : ndarray of complex, shape (N,)
De-normalized a posteriori error e_post[k] = bar_e[k] * xi_half[k].
coefficients : ndarray
History of rho_v (mirrors self.rho_v via self.w).
error_type : str
Set to "a_posteriori".
extra : dict, optional
Present only if return_internal_states=True (see below).
Extra (when return_internal_states=True)
rho : ndarray of complex, shape (M,)
Final normalized lattice reflection-like coefficients.
rho_v : ndarray of complex, shape (M+1,)
Final normalized estimation-stage coefficients.
xi_half : float
Final square-root energy tracker used for normalization.
25class FastRLS(AdaptiveFilter): 26 """ 27 Fast Transversal Recursive Least-Squares (FT-RLS) algorithm (complex-valued). 28 29 The Fast Transversal RLS (also called Fast RLS) is a computationally 30 efficient alternative to standard RLS. By exploiting shift-structure in the 31 regressor and using coupled forward/backward linear prediction recursions, 32 it reduces the per-sample complexity from :math:`O(M^2)` (standard RLS) to 33 approximately :math:`O(M)`. 34 35 This implementation follows Diniz (Alg. 8.1) and maintains internal state 36 for forward and backward predictors, as well as the conversion (likelihood) 37 variable :math:`\\gamma(k)` that maps a priori to a posteriori quantities. 38 39 Parameters 40 ---------- 41 filter_order : int 42 FIR filter order ``M``. The number of coefficients is ``M + 1``. 43 forgetting_factor : float, optional 44 Exponential forgetting factor ``lambda``. Typical values are in 45 ``[0.95, 1.0]``; values closer to 1 give longer memory. Default is 0.99. 46 epsilon : float, optional 47 Positive initialization for the minimum prediction-error energies 48 (regularization), used as :math:`\\xi_{\\min}(0)` in the recursions. 49 Default is 0.1. 50 w_init : array_like of complex, optional 51 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 52 initializes with zeros. 53 safe_eps : float, optional (keyword-only) 54 Small constant used to guard divisions in internal recursions when 55 denominators approach zero. Default is 1e-30. 56 57 Notes 58 ----- 59 Convention 60 ~~~~~~~~~~ 61 At time ``k``, the regressor is formed (most recent sample first) as: 62 63 .. math:: 64 x_k = [x[k], x[k-1], \\ldots, x[k-M]]^T. 65 66 A priori vs a posteriori 67 ~~~~~~~~~~~~~~~~~~~~~~~~ 68 The a priori output and error are: 69 70 .. math:: 71 y(k) = w^H(k-1) x_k, \\qquad e(k) = d(k) - y(k). 72 73 This implementation also computes the a posteriori error using the 74 conversion variable :math:`\\gamma(k)` (from the FT-RLS recursions): 75 76 .. math:: 77 e_{\\text{post}}(k) = \\gamma(k)\\, e(k), \\qquad 78 y_{\\text{post}}(k) = d(k) - e_{\\text{post}}(k). 79 80 The main-filter coefficient update uses the normalized gain-like vector 81 produced by the transversal recursions (``phi_hat_n`` in the code): 82 83 .. math:: 84 w(k) = w(k-1) + \\phi(k)\\, e_{\\text{post}}^*(k), 85 86 where :math:`\\phi(k)` corresponds to the internal vector ``phi_hat_n``. 87 88 Returned internals 89 ~~~~~~~~~~~~~~~~~~ 90 The method always returns a posteriori sequences in ``extra``: 91 ``outputs_posteriori`` and ``errors_posteriori``. If 92 ``return_internal_states=True``, it additionally returns tracks of 93 ``gamma`` and the forward minimum prediction-error energy ``xi_min_f``. 94 95 References 96 ---------- 97 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 98 Implementation*, 5th ed., Algorithm 8.1. 99 """ 100 supports_complex: bool = True 101 forgetting_factor: float 102 epsilon: float 103 n_coeffs: int 104 105 def __init__( 106 self, 107 filter_order: int, 108 forgetting_factor: float = 0.99, 109 epsilon: float = 0.1, 110 w_init: Optional[Union[np.ndarray, list]] = None, 111 *, 112 safe_eps: float = 1e-30, 113 ) -> None: 114 super().__init__(filter_order=filter_order, w_init=w_init) 115 self.forgetting_factor = float(forgetting_factor) 116 self.epsilon = float(epsilon) 117 self.n_coeffs = int(filter_order + 1) 118 self._safe_eps = float(safe_eps) 119 120 self.w = np.asarray(self.w, dtype=np.complex128) 121 122 @validate_input 123 def optimize( 124 self, 125 input_signal: np.ndarray, 126 desired_signal: np.ndarray, 127 verbose: bool = False, 128 return_internal_states: bool = False, 129 ) -> OptimizationResult: 130 """ 131 Executes the FT-RLS adaptation loop. 132 133 Parameters 134 ---------- 135 input_signal : array_like of complex 136 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 137 desired_signal : array_like of complex 138 Desired/reference sequence ``d[k]`` with shape ``(N,)`` (will be 139 flattened). Must have the same length as ``input_signal``. 140 verbose : bool, optional 141 If True, prints the total runtime after completion. 142 return_internal_states : bool, optional 143 If True, includes additional internal trajectories in 144 ``result.extra``: 145 - ``"gamma"``: ndarray of float, shape ``(N,)`` with :math:`\\gamma(k)`. 146 - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` with the forward 147 minimum prediction-error energy :math:`\\xi_{f,\\min}(k)`. 148 149 Returns 150 ------- 151 OptimizationResult 152 Result object with fields: 153 - outputs : ndarray of complex, shape ``(N,)`` 154 A priori output sequence ``y[k] = w^H(k-1) x_k``. 155 - errors : ndarray of complex, shape ``(N,)`` 156 A priori error sequence ``e[k] = d[k] - y[k]``. 157 - coefficients : ndarray of complex 158 Coefficient history recorded by the base class. 159 - error_type : str 160 Set to ``"a_priori"``. 161 - extra : dict 162 Always includes: 163 - ``"outputs_posteriori"``: ndarray of complex, shape ``(N,)``. 164 - ``"errors_posteriori"``: ndarray of complex, shape ``(N,)``. 165 Additionally includes ``"gamma"`` and ``"xi_min_f"`` if 166 ``return_internal_states=True``. 167 """ 168 tic: float = time() 169 170 x: np.ndarray = np.asarray(input_signal, dtype=np.complex128).ravel() 171 d: np.ndarray = np.asarray(desired_signal, dtype=np.complex128).ravel() 172 173 n_samples: int = int(x.size) 174 m_plus_1: int = int(self.filter_order + 1) 175 176 outputs: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 177 errors: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 178 outputs_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 179 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 180 181 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 182 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 183 184 w_f: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 185 w_b: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 186 phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 187 188 gamma_n: float = 1.0 189 xi_min_f_prev: float = float(self.epsilon) 190 xi_min_b: float = float(self.epsilon) 191 192 x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=np.complex128) 193 x_padded[m_plus_1:] = x 194 195 lam = float(self.forgetting_factor) 196 eps = float(self._safe_eps) 197 198 for k in range(n_samples): 199 regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1] 200 201 e_f_priori: np.complex128 = regressor[0] - np.dot(w_f.conj(), regressor[1:]) 202 e_f_post: np.complex128 = e_f_priori * gamma_n 203 204 xi_min_f_curr: float = float(lam * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post))) 205 206 den_phi = lam * xi_min_f_prev 207 if abs(den_phi) < eps: 208 den_phi = np.copysign(eps, den_phi if den_phi != 0 else 1.0) 209 phi_gain: np.complex128 = e_f_priori / den_phi 210 211 phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=np.complex128) 212 phi_hat_n_plus_1[1:] = phi_hat_n 213 phi_hat_n_plus_1[0] += phi_gain 214 phi_hat_n_plus_1[1:] -= phi_gain * w_f 215 216 w_f = w_f + phi_hat_n * np.conj(e_f_post) 217 218 den_g = xi_min_f_curr 219 if abs(den_g) < eps: 220 den_g = np.copysign(eps, den_g if den_g != 0 else 1.0) 221 gamma_n_plus_1: float = float((lam * xi_min_f_prev * gamma_n) / den_g) 222 223 e_b_priori: np.complex128 = lam * xi_min_b * phi_hat_n_plus_1[-1] 224 225 den_gamma = np.real((1.0 / gamma_n_plus_1) - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori))) 226 if abs(den_gamma) < eps: 227 den_gamma = np.copysign(eps, den_gamma if den_gamma != 0 else 1.0) 228 gamma_n = float(1.0 / den_gamma) 229 230 e_b_post: np.complex128 = e_b_priori * gamma_n 231 xi_min_b = float(lam * xi_min_b + np.real(e_b_post * np.conj(e_b_priori))) 232 233 phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b 234 w_b = w_b + phi_hat_n * np.conj(e_b_post) 235 236 y_k: np.complex128 = np.dot(self.w.conj(), regressor[:-1]) 237 outputs[k] = y_k 238 239 e_k: np.complex128 = d[k] - y_k 240 errors[k] = e_k 241 242 errors_post[k] = e_k * gamma_n 243 outputs_post[k] = d[k] - errors_post[k] 244 245 self.w = self.w + phi_hat_n * np.conj(errors_post[k]) 246 self._record_history() 247 248 if return_internal_states and gamma_track is not None and xi_f_track is not None: 249 gamma_track[k] = gamma_n 250 xi_f_track[k] = xi_min_f_curr 251 252 xi_min_f_prev = xi_min_f_curr 253 254 runtime_s: float = float(time() - tic) 255 if verbose: 256 print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms") 257 258 extra: Dict[str, Any] = { 259 "outputs_posteriori": outputs_post, 260 "errors_posteriori": errors_post, 261 } 262 if return_internal_states: 263 extra.update({"gamma": gamma_track, "xi_min_f": xi_f_track}) 264 265 return self._pack_results( 266 outputs=outputs, 267 errors=errors, 268 runtime_s=runtime_s, 269 error_type="a_priori", 270 extra=extra, 271 )
Fast Transversal Recursive Least-Squares (FT-RLS) algorithm (complex-valued).
The Fast Transversal RLS (also called Fast RLS) is a computationally efficient alternative to standard RLS. By exploiting shift-structure in the regressor and using coupled forward/backward linear prediction recursions, it reduces the per-sample complexity from \( O(M^2) \) (standard RLS) to approximately \( O(M) \).
This implementation follows Diniz (Alg. 8.1) and maintains internal state for forward and backward predictors, as well as the conversion (likelihood) variable \( \gamma(k) \) that maps a priori to a posteriori quantities.
Parameters
filter_order : int
FIR filter order M. The number of coefficients is M + 1.
forgetting_factor : float, optional
Exponential forgetting factor lambda. Typical values are in
[0.95, 1.0]; values closer to 1 give longer memory. Default is 0.99.
epsilon : float, optional
Positive initialization for the minimum prediction-error energies
(regularization), used as \( \xi_{\min}(0) \) in the recursions.
Default is 0.1.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
safe_eps : float, optional (keyword-only)
Small constant used to guard divisions in internal recursions when
denominators approach zero. Default is 1e-30.
Notes
Convention
~~
At time k, the regressor is formed (most recent sample first) as:
$$x_k = [x[k], x[k-1], \ldots, x[k-M]]^T.$$
A priori vs a posteriori
~~~~~~~~
The a priori output and error are:
$$y(k) = w^H(k-1) x_k, \qquad e(k) = d(k) - y(k).$$
This implementation also computes the a posteriori error using the conversion variable \( \gamma(k) \) (from the FT-RLS recursions):
$$e_{\text{post}}(k) = \gamma(k)\, e(k), \qquad y_{\text{post}}(k) = d(k) - e_{\text{post}}(k).$$
The main-filter coefficient update uses the normalized gain-like vector
produced by the transversal recursions (phi_hat_n in the code):
$$w(k) = w(k-1) + \phi(k)\, e_{\text{post}}^*(k),$$
where \( \phi(k) \) corresponds to the internal vector phi_hat_n.
Returned internals
~~~~~~
The method always returns a posteriori sequences in extra:
outputs_posteriori and errors_posteriori. If
return_internal_states=True, it additionally returns tracks of
gamma and the forward minimum prediction-error energy xi_min_f.
References
105 def __init__( 106 self, 107 filter_order: int, 108 forgetting_factor: float = 0.99, 109 epsilon: float = 0.1, 110 w_init: Optional[Union[np.ndarray, list]] = None, 111 *, 112 safe_eps: float = 1e-30, 113 ) -> None: 114 super().__init__(filter_order=filter_order, w_init=w_init) 115 self.forgetting_factor = float(forgetting_factor) 116 self.epsilon = float(epsilon) 117 self.n_coeffs = int(filter_order + 1) 118 self._safe_eps = float(safe_eps) 119 120 self.w = np.asarray(self.w, dtype=np.complex128)
122 @validate_input 123 def optimize( 124 self, 125 input_signal: np.ndarray, 126 desired_signal: np.ndarray, 127 verbose: bool = False, 128 return_internal_states: bool = False, 129 ) -> OptimizationResult: 130 """ 131 Executes the FT-RLS adaptation loop. 132 133 Parameters 134 ---------- 135 input_signal : array_like of complex 136 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 137 desired_signal : array_like of complex 138 Desired/reference sequence ``d[k]`` with shape ``(N,)`` (will be 139 flattened). Must have the same length as ``input_signal``. 140 verbose : bool, optional 141 If True, prints the total runtime after completion. 142 return_internal_states : bool, optional 143 If True, includes additional internal trajectories in 144 ``result.extra``: 145 - ``"gamma"``: ndarray of float, shape ``(N,)`` with :math:`\\gamma(k)`. 146 - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` with the forward 147 minimum prediction-error energy :math:`\\xi_{f,\\min}(k)`. 148 149 Returns 150 ------- 151 OptimizationResult 152 Result object with fields: 153 - outputs : ndarray of complex, shape ``(N,)`` 154 A priori output sequence ``y[k] = w^H(k-1) x_k``. 155 - errors : ndarray of complex, shape ``(N,)`` 156 A priori error sequence ``e[k] = d[k] - y[k]``. 157 - coefficients : ndarray of complex 158 Coefficient history recorded by the base class. 159 - error_type : str 160 Set to ``"a_priori"``. 161 - extra : dict 162 Always includes: 163 - ``"outputs_posteriori"``: ndarray of complex, shape ``(N,)``. 164 - ``"errors_posteriori"``: ndarray of complex, shape ``(N,)``. 165 Additionally includes ``"gamma"`` and ``"xi_min_f"`` if 166 ``return_internal_states=True``. 167 """ 168 tic: float = time() 169 170 x: np.ndarray = np.asarray(input_signal, dtype=np.complex128).ravel() 171 d: np.ndarray = np.asarray(desired_signal, dtype=np.complex128).ravel() 172 173 n_samples: int = int(x.size) 174 m_plus_1: int = int(self.filter_order + 1) 175 176 outputs: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 177 errors: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 178 outputs_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 179 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.complex128) 180 181 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 182 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=float) if return_internal_states else None 183 184 w_f: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 185 w_b: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 186 phi_hat_n: np.ndarray = np.zeros(m_plus_1, dtype=np.complex128) 187 188 gamma_n: float = 1.0 189 xi_min_f_prev: float = float(self.epsilon) 190 xi_min_b: float = float(self.epsilon) 191 192 x_padded: np.ndarray = np.zeros(n_samples + m_plus_1, dtype=np.complex128) 193 x_padded[m_plus_1:] = x 194 195 lam = float(self.forgetting_factor) 196 eps = float(self._safe_eps) 197 198 for k in range(n_samples): 199 regressor: np.ndarray = x_padded[k : k + m_plus_1 + 1][::-1] 200 201 e_f_priori: np.complex128 = regressor[0] - np.dot(w_f.conj(), regressor[1:]) 202 e_f_post: np.complex128 = e_f_priori * gamma_n 203 204 xi_min_f_curr: float = float(lam * xi_min_f_prev + np.real(e_f_priori * np.conj(e_f_post))) 205 206 den_phi = lam * xi_min_f_prev 207 if abs(den_phi) < eps: 208 den_phi = np.copysign(eps, den_phi if den_phi != 0 else 1.0) 209 phi_gain: np.complex128 = e_f_priori / den_phi 210 211 phi_hat_n_plus_1: np.ndarray = np.zeros(m_plus_1 + 1, dtype=np.complex128) 212 phi_hat_n_plus_1[1:] = phi_hat_n 213 phi_hat_n_plus_1[0] += phi_gain 214 phi_hat_n_plus_1[1:] -= phi_gain * w_f 215 216 w_f = w_f + phi_hat_n * np.conj(e_f_post) 217 218 den_g = xi_min_f_curr 219 if abs(den_g) < eps: 220 den_g = np.copysign(eps, den_g if den_g != 0 else 1.0) 221 gamma_n_plus_1: float = float((lam * xi_min_f_prev * gamma_n) / den_g) 222 223 e_b_priori: np.complex128 = lam * xi_min_b * phi_hat_n_plus_1[-1] 224 225 den_gamma = np.real((1.0 / gamma_n_plus_1) - (phi_hat_n_plus_1[-1] * np.conj(e_b_priori))) 226 if abs(den_gamma) < eps: 227 den_gamma = np.copysign(eps, den_gamma if den_gamma != 0 else 1.0) 228 gamma_n = float(1.0 / den_gamma) 229 230 e_b_post: np.complex128 = e_b_priori * gamma_n 231 xi_min_b = float(lam * xi_min_b + np.real(e_b_post * np.conj(e_b_priori))) 232 233 phi_hat_n = phi_hat_n_plus_1[:-1] + phi_hat_n_plus_1[-1] * w_b 234 w_b = w_b + phi_hat_n * np.conj(e_b_post) 235 236 y_k: np.complex128 = np.dot(self.w.conj(), regressor[:-1]) 237 outputs[k] = y_k 238 239 e_k: np.complex128 = d[k] - y_k 240 errors[k] = e_k 241 242 errors_post[k] = e_k * gamma_n 243 outputs_post[k] = d[k] - errors_post[k] 244 245 self.w = self.w + phi_hat_n * np.conj(errors_post[k]) 246 self._record_history() 247 248 if return_internal_states and gamma_track is not None and xi_f_track is not None: 249 gamma_track[k] = gamma_n 250 xi_f_track[k] = xi_min_f_curr 251 252 xi_min_f_prev = xi_min_f_curr 253 254 runtime_s: float = float(time() - tic) 255 if verbose: 256 print(f"[FastRLS] Completed in {runtime_s * 1000:.02f} ms") 257 258 extra: Dict[str, Any] = { 259 "outputs_posteriori": outputs_post, 260 "errors_posteriori": errors_post, 261 } 262 if return_internal_states: 263 extra.update({"gamma": gamma_track, "xi_min_f": xi_f_track}) 264 265 return self._pack_results( 266 outputs=outputs, 267 errors=errors, 268 runtime_s=runtime_s, 269 error_type="a_priori", 270 extra=extra, 271 )
Executes the FT-RLS adaptation loop.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of complex
Desired/reference sequence d[k] with shape (N,) (will be
flattened). Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes additional internal trajectories in
result.extra:
- "gamma": ndarray of float, shape (N,) with \( \gamma(k) \).
- "xi_min_f": ndarray of float, shape (N,) with the forward
minimum prediction-error energy \( \xi_{f,\min}(k) \).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
A priori output sequence y[k] = w^H(k-1) x_k.
- errors : ndarray of complex, shape (N,)
A priori error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always includes:
- "outputs_posteriori": ndarray of complex, shape (N,).
- "errors_posteriori": ndarray of complex, shape (N,).
Additionally includes "gamma" and "xi_min_f" if
return_internal_states=True.
26class StabFastRLS(AdaptiveFilter): 27 """ 28 Stabilized Fast Transversal RLS (SFT-RLS) algorithm (real-valued). 29 30 The Stabilized Fast Transversal RLS is a numerically robust variant of the 31 Fast Transversal RLS. It preserves the approximately :math:`O(M)` per-sample 32 complexity of transversal RLS recursions while improving stability in 33 finite-precision arithmetic by introducing feedback stabilization in the 34 backward prediction recursion (via ``kappa1``, ``kappa2``, ``kappa3``) and by 35 guarding divisions/energies through floors and optional clipping. 36 37 This implementation corresponds to Diniz (Alg. 8.2) and is restricted to 38 **real-valued** input/desired sequences (enforced by ``ensure_real_signals``). 39 40 Parameters 41 ---------- 42 filter_order : int 43 FIR filter order ``M``. The number of coefficients is ``M + 1``. 44 forgetting_factor : float, optional 45 Exponential forgetting factor ``lambda``. Default is 0.99. 46 epsilon : float, optional 47 Positive initialization for the minimum prediction-error energies 48 (regularization), used as :math:`\\xi_{\\min}(0)` in the recursions. 49 Default is 1e-1. 50 kappa1, kappa2, kappa3 : float, optional 51 Stabilization constants used to form stabilized versions of the backward 52 prediction error. Defaults are 1.5, 2.5, and 1.0. 53 w_init : array_like of float, optional 54 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 55 initializes with zeros. 56 denom_floor : float, optional 57 Safety floor used to clamp denominators before inversion to prevent 58 overflow/underflow and non-finite values during internal recursions. 59 If None, a small value based on machine ``tiny`` is used. 60 xi_floor : float, optional 61 Safety floor for prediction error energies (e.g., ``xi_min_f``, 62 ``xi_min_b``). If None, a small value based on machine ``tiny`` is used. 63 gamma_clip : float, optional 64 Optional clipping threshold applied to an intermediate conversion factor 65 to avoid extreme values (singularities). If None, no clipping is applied. 66 67 Notes 68 ----- 69 Convention 70 ~~~~~~~~~~ 71 At time ``k``, the internal regressor window has length ``M + 2`` (denoted 72 ``r`` in the code) and is formed in reverse order (most recent sample first). 73 The main adaptive filter uses the first ``M + 1`` entries of this window. 74 75 A priori vs a posteriori 76 ~~~~~~~~~~~~~~~~~~~~~~~~ 77 The a priori output and error are: 78 79 .. math:: 80 y(k) = w^T(k-1) x_k, \\qquad e(k) = d(k) - y(k), 81 82 and the a posteriori error returned by this implementation is: 83 84 .. math:: 85 e_{\\text{post}}(k) = \\gamma(k)\\, e(k), 86 87 where :math:`\\gamma(k)` is produced by the stabilized transversal recursions. 88 89 Stabilization with kappa 90 ~~~~~~~~~~~~~~~~~~~~~~~~ 91 The algorithm forms stabilized backward-error combinations (three variants) 92 from two backward-error lines in the recursion (named ``e_b_line1`` and 93 ``e_b_line2`` in the code). Conceptually: 94 95 .. math:: 96 e_{b,i}(k) = \\kappa_i\\, e_{b,2}(k) + (1-\\kappa_i)\\, e_{b,1}(k), 97 98 for :math:`\\kappa_i \\in \\{\\kappa_1, \\kappa_2, \\kappa_3\\}`. 99 100 Numerical safeguards 101 ~~~~~~~~~~~~~~~~~~~~ 102 Several denominators are clamped to ``denom_floor`` before inversion and 103 minimum energies are floored by ``xi_floor``. The counts of clamp events are 104 tracked and returned in ``extra["clamp_stats"]``. 105 106 References 107 ---------- 108 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 109 Implementation*, 5th ed., Algorithm 8.2. 110 """ 111 supports_complex: bool = False 112 lambda_: float 113 epsilon: float 114 kappa1: float 115 kappa2: float 116 kappa3: float 117 denom_floor: float 118 xi_floor: float 119 gamma_clip: Optional[float] 120 n_coeffs: int 121 122 def __init__( 123 self, 124 filter_order: int, 125 forgetting_factor: float = 0.99, 126 epsilon: float = 1e-1, 127 kappa1: float = 1.5, 128 kappa2: float = 2.5, 129 kappa3: float = 1.0, 130 w_init: Optional[Union[np.ndarray, list]] = None, 131 denom_floor: Optional[float] = None, 132 xi_floor: Optional[float] = None, 133 gamma_clip: Optional[float] = None, 134 ) -> None: 135 super().__init__(filter_order=filter_order, w_init=w_init) 136 137 self.filter_order = int(filter_order) 138 self.n_coeffs = int(self.filter_order + 1) 139 self.lambda_ = float(forgetting_factor) 140 self.epsilon = float(epsilon) 141 self.kappa1 = float(kappa1) 142 self.kappa2 = float(kappa2) 143 self.kappa3 = float(kappa3) 144 145 finfo = np.finfo(np.float64) 146 self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3) 147 self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6) 148 self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None 149 150 self.w = np.asarray(self.w, dtype=np.float64) 151 152 @staticmethod 153 def _clamp_denom(den: float, floor: float) -> float: 154 if (not np.isfinite(den)) or (abs(den) < floor): 155 return float(np.copysign(floor, den if den != 0 else 1.0)) 156 return float(den) 157 158 def _safe_inv(self, den: float, floor: float, clamp_counter: Dict[str, int], key: str) -> float: 159 den_clamped = self._clamp_denom(den, floor) 160 if den_clamped != den: 161 clamp_counter[key] = clamp_counter.get(key, 0) + 1 162 return 1.0 / den_clamped 163 164 @ensure_real_signals 165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Executes the stabilized FT-RLS adaptation loop (real-valued). 175 176 Parameters 177 ---------- 178 input_signal : array_like of float 179 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 180 desired_signal : array_like of float 181 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 182 Must have the same length as ``input_signal``. 183 verbose : bool, optional 184 If True, prints the total runtime after completion. 185 return_internal_states : bool, optional 186 If True, includes internal trajectories in ``result.extra``: 187 - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` (forward minimum 188 prediction-error energy). 189 - ``"xi_min_b"``: ndarray of float, shape ``(N,)`` (backward minimum 190 prediction-error energy). 191 - ``"gamma"``: ndarray of float, shape ``(N,)`` (conversion factor). 192 193 Returns 194 ------- 195 OptimizationResult 196 Result object with fields: 197 - outputs : ndarray of float, shape ``(N,)`` 198 A priori output sequence ``y[k]``. 199 - errors : ndarray of float, shape ``(N,)`` 200 A priori error sequence ``e[k] = d[k] - y[k]``. 201 - coefficients : ndarray of float 202 Coefficient history recorded by the base class. 203 - error_type : str 204 Set to ``"a_priori"``. 205 - extra : dict 206 Always includes: 207 - ``"errors_posteriori"``: ndarray of float, shape ``(N,)`` with 208 :math:`e_{\\text{post}}(k)`. 209 - ``"clamp_stats"``: dict with counters of denominator clamps. 210 Additionally includes ``"xi_min_f"``, ``"xi_min_b"``, and 211 ``"gamma"`` if ``return_internal_states=True``. 212 """ 213 tic: float = time() 214 215 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 216 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 217 218 n_samples: int = int(x.size) 219 n_taps: int = int(self.filter_order + 1) 220 reg_len: int = int(self.filter_order + 2) 221 222 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 223 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 224 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64) 225 226 xi_min_f: float = float(self.epsilon) 227 xi_min_b: float = float(self.epsilon) 228 gamma_n_3: float = 1.0 229 230 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 231 xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 232 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 233 234 w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64) 235 w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64) 236 phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64) 237 phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64) 238 239 x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64) 240 x_padded[n_taps:] = x 241 242 clamp_counter: Dict[str, int] = {} 243 244 for k in range(n_samples): 245 r: np.ndarray = x_padded[k : k + reg_len][::-1] 246 247 e_f_priori: float = float(r[0] - np.dot(w_f, r[1:])) 248 e_f_post: float = float(e_f_priori * gamma_n_3) 249 250 scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f") 251 phi_hat_np1[0] = scale * e_f_priori 252 phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f 253 254 inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3") 255 gamma_np1_1: float = self._safe_inv( 256 inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1" 257 ) 258 259 if self.gamma_clip is not None: 260 gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip)) 261 262 inv_xi_f_lam: float = self._safe_inv( 263 xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f" 264 ) 265 xi_min_f = max( 266 self._safe_inv( 267 inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2), 268 self.denom_floor, 269 clamp_counter, 270 "inv_den_xi_f", 271 ), 272 self.xi_floor, 273 ) 274 w_f += phi_hat_n * e_f_post 275 276 e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1]) 277 e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1])) 278 279 eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1)) 280 eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2)) 281 eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3)) 282 283 inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1") 284 gamma_n_2: float = self._safe_inv( 285 inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2" 286 ) 287 288 xi_min_b = max( 289 float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2), 290 self.xi_floor, 291 ) 292 293 phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b 294 w_b += phi_hat_n * (eb3_1 * gamma_n_2) 295 296 gamma_n_3 = self._safe_inv( 297 1.0 + float(np.dot(phi_hat_n, r[:-1])), 298 self.denom_floor, 299 clamp_counter, 300 "inv_g_n3", 301 ) 302 303 y_k: float = float(np.dot(self.w, r[:-1])) 304 outputs[k] = y_k 305 e_k: float = float(d[k] - y_k) 306 errors[k] = e_k 307 e_post_k: float = float(e_k * gamma_n_3) 308 errors_post[k] = e_post_k 309 310 self.w += phi_hat_n * e_post_k 311 self._record_history() 312 313 if return_internal_states and xi_f_track is not None: 314 xi_f_track[k], xi_b_track[k], gamma_track[k] = xi_min_f, xi_min_b, gamma_n_3 315 316 runtime_s: float = float(time() - tic) 317 if verbose: 318 print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms") 319 320 extra: Dict[str, Any] = {"errors_posteriori": errors_post, "clamp_stats": clamp_counter} 321 if return_internal_states: 322 extra.update({"xi_min_f": xi_f_track, "xi_min_b": xi_b_track, "gamma": gamma_track}) 323 324 return self._pack_results( 325 outputs=outputs, 326 errors=errors, 327 runtime_s=runtime_s, 328 error_type="a_priori", 329 extra=extra, 330 )
Stabilized Fast Transversal RLS (SFT-RLS) algorithm (real-valued).
The Stabilized Fast Transversal RLS is a numerically robust variant of the
Fast Transversal RLS. It preserves the approximately \( O(M) \) per-sample
complexity of transversal RLS recursions while improving stability in
finite-precision arithmetic by introducing feedback stabilization in the
backward prediction recursion (via kappa1, kappa2, kappa3) and by
guarding divisions/energies through floors and optional clipping.
This implementation corresponds to Diniz (Alg. 8.2) and is restricted to
real-valued input/desired sequences (enforced by ensure_real_signals).
Parameters
filter_order : int
FIR filter order M. The number of coefficients is M + 1.
forgetting_factor : float, optional
Exponential forgetting factor lambda. Default is 0.99.
epsilon : float, optional
Positive initialization for the minimum prediction-error energies
(regularization), used as \( \xi_{\min}(0) \) in the recursions.
Default is 1e-1.
kappa1, kappa2, kappa3 : float, optional
Stabilization constants used to form stabilized versions of the backward
prediction error. Defaults are 1.5, 2.5, and 1.0.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
denom_floor : float, optional
Safety floor used to clamp denominators before inversion to prevent
overflow/underflow and non-finite values during internal recursions.
If None, a small value based on machine tiny is used.
xi_floor : float, optional
Safety floor for prediction error energies (e.g., xi_min_f,
xi_min_b). If None, a small value based on machine tiny is used.
gamma_clip : float, optional
Optional clipping threshold applied to an intermediate conversion factor
to avoid extreme values (singularities). If None, no clipping is applied.
Notes
Convention
~~
At time k, the internal regressor window has length M + 2 (denoted
r in the code) and is formed in reverse order (most recent sample first).
The main adaptive filter uses the first M + 1 entries of this window.
A priori vs a posteriori
~~~~~~~~
The a priori output and error are:
$$y(k) = w^T(k-1) x_k, \qquad e(k) = d(k) - y(k),$$
and the a posteriori error returned by this implementation is:
$$e_{\text{post}}(k) = \gamma(k)\, e(k),$$
where \( \gamma(k) \) is produced by the stabilized transversal recursions.
Stabilization with kappa
~~~~~~~~
The algorithm forms stabilized backward-error combinations (three variants)
from two backward-error lines in the recursion (named e_b_line1 and
e_b_line2 in the code). Conceptually:
$$e_{b,i}(k) = \kappa_i\, e_{b,2}(k) + (1-\kappa_i)\, e_{b,1}(k),$$
for \( \kappa_i \in {\kappa_1, \kappa_2, \kappa_3} \).
Numerical safeguards
~~~~
Several denominators are clamped to denom_floor before inversion and
minimum energies are floored by xi_floor. The counts of clamp events are
tracked and returned in extra["clamp_stats"].
References
Implementation*, 5th ed., Algorithm 8.2.
122 def __init__( 123 self, 124 filter_order: int, 125 forgetting_factor: float = 0.99, 126 epsilon: float = 1e-1, 127 kappa1: float = 1.5, 128 kappa2: float = 2.5, 129 kappa3: float = 1.0, 130 w_init: Optional[Union[np.ndarray, list]] = None, 131 denom_floor: Optional[float] = None, 132 xi_floor: Optional[float] = None, 133 gamma_clip: Optional[float] = None, 134 ) -> None: 135 super().__init__(filter_order=filter_order, w_init=w_init) 136 137 self.filter_order = int(filter_order) 138 self.n_coeffs = int(self.filter_order + 1) 139 self.lambda_ = float(forgetting_factor) 140 self.epsilon = float(epsilon) 141 self.kappa1 = float(kappa1) 142 self.kappa2 = float(kappa2) 143 self.kappa3 = float(kappa3) 144 145 finfo = np.finfo(np.float64) 146 self.denom_floor = float(denom_floor) if denom_floor is not None else float(finfo.tiny * 1e3) 147 self.xi_floor = float(xi_floor) if xi_floor is not None else float(finfo.tiny * 1e6) 148 self.gamma_clip = float(gamma_clip) if gamma_clip is not None else None 149 150 self.w = np.asarray(self.w, dtype=np.float64)
164 @ensure_real_signals 165 @validate_input 166 def optimize( 167 self, 168 input_signal: np.ndarray, 169 desired_signal: np.ndarray, 170 verbose: bool = False, 171 return_internal_states: bool = False, 172 ) -> OptimizationResult: 173 """ 174 Executes the stabilized FT-RLS adaptation loop (real-valued). 175 176 Parameters 177 ---------- 178 input_signal : array_like of float 179 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 180 desired_signal : array_like of float 181 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 182 Must have the same length as ``input_signal``. 183 verbose : bool, optional 184 If True, prints the total runtime after completion. 185 return_internal_states : bool, optional 186 If True, includes internal trajectories in ``result.extra``: 187 - ``"xi_min_f"``: ndarray of float, shape ``(N,)`` (forward minimum 188 prediction-error energy). 189 - ``"xi_min_b"``: ndarray of float, shape ``(N,)`` (backward minimum 190 prediction-error energy). 191 - ``"gamma"``: ndarray of float, shape ``(N,)`` (conversion factor). 192 193 Returns 194 ------- 195 OptimizationResult 196 Result object with fields: 197 - outputs : ndarray of float, shape ``(N,)`` 198 A priori output sequence ``y[k]``. 199 - errors : ndarray of float, shape ``(N,)`` 200 A priori error sequence ``e[k] = d[k] - y[k]``. 201 - coefficients : ndarray of float 202 Coefficient history recorded by the base class. 203 - error_type : str 204 Set to ``"a_priori"``. 205 - extra : dict 206 Always includes: 207 - ``"errors_posteriori"``: ndarray of float, shape ``(N,)`` with 208 :math:`e_{\\text{post}}(k)`. 209 - ``"clamp_stats"``: dict with counters of denominator clamps. 210 Additionally includes ``"xi_min_f"``, ``"xi_min_b"``, and 211 ``"gamma"`` if ``return_internal_states=True``. 212 """ 213 tic: float = time() 214 215 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 216 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 217 218 n_samples: int = int(x.size) 219 n_taps: int = int(self.filter_order + 1) 220 reg_len: int = int(self.filter_order + 2) 221 222 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 223 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 224 errors_post: np.ndarray = np.zeros(n_samples, dtype=np.float64) 225 226 xi_min_f: float = float(self.epsilon) 227 xi_min_b: float = float(self.epsilon) 228 gamma_n_3: float = 1.0 229 230 xi_f_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 231 xi_b_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 232 gamma_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 233 234 w_f: np.ndarray = np.zeros(n_taps, dtype=np.float64) 235 w_b: np.ndarray = np.zeros(n_taps, dtype=np.float64) 236 phi_hat_n: np.ndarray = np.zeros(n_taps, dtype=np.float64) 237 phi_hat_np1: np.ndarray = np.zeros(reg_len, dtype=np.float64) 238 239 x_padded: np.ndarray = np.zeros(n_samples + n_taps, dtype=np.float64) 240 x_padded[n_taps:] = x 241 242 clamp_counter: Dict[str, int] = {} 243 244 for k in range(n_samples): 245 r: np.ndarray = x_padded[k : k + reg_len][::-1] 246 247 e_f_priori: float = float(r[0] - np.dot(w_f, r[1:])) 248 e_f_post: float = float(e_f_priori * gamma_n_3) 249 250 scale: float = self._safe_inv(self.lambda_ * xi_min_f, self.denom_floor, clamp_counter, "inv_lam_xi_f") 251 phi_hat_np1[0] = scale * e_f_priori 252 phi_hat_np1[1:] = phi_hat_n - phi_hat_np1[0] * w_f 253 254 inv_g3: float = self._safe_inv(gamma_n_3, self.denom_floor, clamp_counter, "inv_g3") 255 gamma_np1_1: float = self._safe_inv( 256 inv_g3 + phi_hat_np1[0] * e_f_priori, self.denom_floor, clamp_counter, "inv_g_np1" 257 ) 258 259 if self.gamma_clip is not None: 260 gamma_np1_1 = float(np.clip(gamma_np1_1, -self.gamma_clip, self.gamma_clip)) 261 262 inv_xi_f_lam: float = self._safe_inv( 263 xi_min_f * self.lambda_, self.denom_floor, clamp_counter, "inv_xi_f" 264 ) 265 xi_min_f = max( 266 self._safe_inv( 267 inv_xi_f_lam - gamma_np1_1 * (phi_hat_np1[0] ** 2), 268 self.denom_floor, 269 clamp_counter, 270 "inv_den_xi_f", 271 ), 272 self.xi_floor, 273 ) 274 w_f += phi_hat_n * e_f_post 275 276 e_b_line1: float = float(self.lambda_ * xi_min_b * phi_hat_np1[-1]) 277 e_b_line2: float = float(r[-1] - np.dot(w_b, r[:-1])) 278 279 eb3_1: float = float(e_b_line2 * self.kappa1 + e_b_line1 * (1.0 - self.kappa1)) 280 eb3_2: float = float(e_b_line2 * self.kappa2 + e_b_line1 * (1.0 - self.kappa2)) 281 eb3_3: float = float(e_b_line2 * self.kappa3 + e_b_line1 * (1.0 - self.kappa3)) 282 283 inv_g_np1_1: float = self._safe_inv(gamma_np1_1, self.denom_floor, clamp_counter, "inv_g_np1_1") 284 gamma_n_2: float = self._safe_inv( 285 inv_g_np1_1 - phi_hat_np1[-1] * eb3_3, self.denom_floor, clamp_counter, "inv_g_n2" 286 ) 287 288 xi_min_b = max( 289 float(self.lambda_ * xi_min_b + (eb3_2 * gamma_n_2) * eb3_2), 290 self.xi_floor, 291 ) 292 293 phi_hat_n = phi_hat_np1[:-1] + phi_hat_np1[-1] * w_b 294 w_b += phi_hat_n * (eb3_1 * gamma_n_2) 295 296 gamma_n_3 = self._safe_inv( 297 1.0 + float(np.dot(phi_hat_n, r[:-1])), 298 self.denom_floor, 299 clamp_counter, 300 "inv_g_n3", 301 ) 302 303 y_k: float = float(np.dot(self.w, r[:-1])) 304 outputs[k] = y_k 305 e_k: float = float(d[k] - y_k) 306 errors[k] = e_k 307 e_post_k: float = float(e_k * gamma_n_3) 308 errors_post[k] = e_post_k 309 310 self.w += phi_hat_n * e_post_k 311 self._record_history() 312 313 if return_internal_states and xi_f_track is not None: 314 xi_f_track[k], xi_b_track[k], gamma_track[k] = xi_min_f, xi_min_b, gamma_n_3 315 316 runtime_s: float = float(time() - tic) 317 if verbose: 318 print(f"[StabFastRLS] Completed in {runtime_s * 1000:.02f} ms") 319 320 extra: Dict[str, Any] = {"errors_posteriori": errors_post, "clamp_stats": clamp_counter} 321 if return_internal_states: 322 extra.update({"xi_min_f": xi_f_track, "xi_min_b": xi_b_track, "gamma": gamma_track}) 323 324 return self._pack_results( 325 outputs=outputs, 326 errors=errors, 327 runtime_s=runtime_s, 328 error_type="a_priori", 329 extra=extra, 330 )
Executes the stabilized FT-RLS adaptation loop (real-valued).
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes internal trajectories in result.extra:
- "xi_min_f": ndarray of float, shape (N,) (forward minimum
prediction-error energy).
- "xi_min_b": ndarray of float, shape (N,) (backward minimum
prediction-error energy).
- "gamma": ndarray of float, shape (N,) (conversion factor).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
A priori output sequence y[k].
- errors : ndarray of float, shape (N,)
A priori error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict
Always includes:
- "errors_posteriori": ndarray of float, shape (N,) with
\( e_{\text{post}}(k) \).
- "clamp_stats": dict with counters of denominator clamps.
Additionally includes "xi_min_f", "xi_min_b", and
"gamma" if return_internal_states=True.
33class QRRLS(AdaptiveFilter): 34 """ 35 QR-RLS adaptive filter using Givens rotations (real-valued). 36 37 QR-decomposition RLS implementation based on Diniz (Alg. 9.1, 3rd ed.), 38 following the reference MATLAB routine ``QR_RLS.m``. This variant maintains 39 internal state variables closely matching the MATLAB code and applies 40 sequential real Givens rotations to a stacked system. 41 42 Parameters 43 ---------- 44 filter_order : int 45 Adaptive FIR filter order ``M``. The number of coefficients is ``M+1``. 46 lamb : float, optional 47 Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.99. 48 w_init : array_like of float, optional 49 Initial coefficient vector ``w(0)`` with shape ``(M+1,)``. If None, 50 initializes with zeros. 51 denom_floor : float, optional 52 Small positive floor used to avoid division by (near) zero in scalar 53 denominators. Default is 1e-18. 54 55 Notes 56 ----- 57 Real-valued only 58 This implementation is restricted to real-valued signals and coefficients 59 (``supports_complex=False``). The constraint is enforced via 60 ``@ensure_real_signals`` on :meth:`optimize`. 61 62 State variables (MATLAB naming) 63 This implementation keeps the same key state variables as ``QR_RLS.m``: 64 65 - ``ULineMatrix`` : ndarray, shape ``(M+1, M+1)`` 66 Upper-triangular-like matrix updated by sequential Givens rotations. 67 - ``dLine_q2`` : ndarray, shape ``(M+1,)`` 68 Transformed desired vector accumulated through the same rotations. 69 - ``gamma`` : float 70 Scalar accumulated as the product of Givens cosines in each iteration. 71 72 Givens-rotation structure (high level) 73 At each iteration, the algorithm applies Givens rotations to eliminate 74 components of the stacked vector ``[regressor; ULineMatrix]`` while 75 applying the same rotations to ``[d_line; dLine_q2]``. The resulting 76 system is then solved by back-substitution to obtain the updated weights. 77 78 Output/error conventions (MATLAB-style) 79 The returned ``errors`` correspond to the MATLAB ``errorVector``: 80 81 .. math:: 82 e[k] = d_{line}[k] \\cdot \\gamma[k], 83 84 and the reported output is computed as: 85 86 .. math:: 87 y[k] = d[k] - e[k]. 88 89 Since this error is formed after the rotation steps (i.e., after the 90 QR-update stage), the method sets ``error_type="a_posteriori"``. 91 92 References 93 ---------- 94 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 95 Implementation*, 3rd ed., Algorithm 9.1 (QR-RLS). 96 """ 97 supports_complex: bool = False 98 99 lamb: float 100 n_coeffs: int 101 ULineMatrix: np.ndarray 102 dLine_q2: np.ndarray 103 _tiny: float 104 105 def __init__( 106 self, 107 filter_order: int, 108 lamb: float = 0.99, 109 w_init: Optional[ArrayLike] = None, 110 *, 111 denom_floor: float = 1e-18, 112 ) -> None: 113 super().__init__(filter_order=int(filter_order), w_init=w_init) 114 115 self.lamb = float(lamb) 116 if not (0.0 < self.lamb <= 1.0): 117 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.") 118 119 self._tiny = float(denom_floor) 120 121 self.n_coeffs = int(self.filter_order) + 1 122 123 self.w = np.asarray(self.w, dtype=np.float64) 124 125 if w_init is not None: 126 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 127 if w0.size != self.n_coeffs: 128 raise ValueError( 129 f"w_init must have length {self.n_coeffs}, got {w0.size}." 130 ) 131 self.w = w0.copy() 132 133 self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64) 134 self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64) 135 136 self.w_history = [] 137 self._record_history() 138 139 @staticmethod 140 def _givens_rotate_rows( 141 row0: np.ndarray, 142 row1: np.ndarray, 143 cos_t: float, 144 sin_t: float, 145 ) -> tuple[np.ndarray, np.ndarray]: 146 """ 147 Applies a real 2x2 Givens rotation to a pair of stacked rows. 148 149 The rotation is: 150 151 .. math:: 152 \\begin{bmatrix} 153 \\cos\\theta & -\\sin\\theta \\\\ 154 \\sin\\theta & \\cos\\theta 155 \\end{bmatrix} 156 \\begin{bmatrix} 157 \\mathrm{row0} \\\\ 158 \\mathrm{row1} 159 \\end{bmatrix} 160 = 161 \\begin{bmatrix} 162 \\mathrm{row0}' \\\\ 163 \\mathrm{row1}' 164 \\end{bmatrix}. 165 166 Parameters 167 ---------- 168 row0, row1 : ndarray of float 169 1-D arrays with the same length (representing two rows to be rotated). 170 cos_t, sin_t : float 171 Givens rotation cosine and sine. 172 173 Returns 174 ------- 175 (row0_rot, row1_rot) : tuple of ndarray 176 Rotated rows. 177 """ 178 new0 = cos_t * row0 - sin_t * row1 179 new1 = sin_t * row0 + cos_t * row1 180 return new0, new1 181 182 @ensure_real_signals 183 @validate_input 184 def optimize( 185 self, 186 input_signal: np.ndarray, 187 desired_signal: np.ndarray, 188 verbose: bool = False, 189 return_internal_states: bool = False, 190 ) -> OptimizationResult: 191 """ 192 Executes the QR-RLS adaptation loop (MATLAB-style recursion). 193 194 Parameters 195 ---------- 196 input_signal : array_like of float 197 Real-valued input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 198 desired_signal : array_like of float 199 Real-valued desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 200 verbose : bool, optional 201 If True, prints the total runtime after completion. 202 return_internal_states : bool, optional 203 If True, includes the last internal states in ``result.extra``: 204 ``"ULineMatrix_last"``, ``"dLine_q2_last"``, ``"gamma_last"``, 205 and ``"d_line_last"``. 206 207 Returns 208 ------- 209 OptimizationResult 210 Result object with fields: 211 - outputs : ndarray of float, shape ``(N,)`` 212 Scalar output sequence as computed by the MATLAB-style routine: 213 ``y[k] = d[k] - e[k]``. 214 - errors : ndarray of float, shape ``(N,)`` 215 MATLAB-style a posteriori error quantity: 216 ``e[k] = d_line[k] * gamma[k]``. 217 - coefficients : ndarray of float 218 Coefficient history recorded by the base class. 219 - error_type : str 220 Set to ``"a_posteriori"``. 221 - extra : dict, optional 222 Present only if ``return_internal_states=True`` with: 223 - ``ULineMatrix_last`` : ndarray 224 Final ``ULineMatrix``. 225 - ``dLine_q2_last`` : ndarray 226 Final ``dLine_q2``. 227 - ``gamma_last`` : float 228 ``gamma`` at the last iteration. 229 - ``d_line_last`` : float 230 ``d_line`` at the last iteration. 231 - ``forgetting_factor`` : float 232 The forgetting factor ``lambda`` used. 233 """ 234 t0 = perf_counter() 235 236 x = np.asarray(input_signal, dtype=np.float64).ravel() 237 d = np.asarray(desired_signal, dtype=np.float64).ravel() 238 239 n_samples = int(d.size) 240 n = int(self.n_coeffs) 241 M = int(self.filter_order) 242 243 if n_samples < n: 244 raise ValueError( 245 f"QR-RLS needs at least (filter_order+1) samples. " 246 f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}." 247 ) 248 249 outputs = np.zeros(n_samples, dtype=np.float64) 250 errors = np.zeros(n_samples, dtype=np.float64) 251 252 self.ULineMatrix.fill(0.0) 253 self.dLine_q2.fill(0.0) 254 255 self.w_history = [] 256 self._record_history() 257 258 denom0 = float(x[0]) 259 if abs(denom0) < self._tiny: 260 denom0 = self._tiny if denom0 >= 0.0 else -self._tiny 261 262 for kt in range(n): 263 w_tmp = np.zeros(n, dtype=np.float64) 264 w_tmp[0] = float(d[0] / denom0) 265 266 for ct in range(1, kt + 1): 267 num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct]) 268 w_tmp[ct] = float(num / denom0) 269 270 self.w = w_tmp 271 self._record_history() 272 273 xk = np.zeros(n, dtype=np.float64) 274 start = max(0, kt - M) 275 seg = x[start : kt + 1][::-1] 276 xk[: seg.size] = seg 277 outputs[kt] = float(np.dot(w_tmp, xk)) 278 279 sqrt_lam = float(np.sqrt(self.lamb)) 280 281 for it in range(M + 1): 282 scale = float(self.lamb ** ((it + 1) / 2.0)) 283 284 vec = x[(n - it - 1) :: -1] 285 self.ULineMatrix[it, 0 : (n - it)] = scale * vec 286 287 self.dLine_q2[it] = scale * float(d[n - it - 1]) 288 289 gamma_last: float = 1.0 290 d_line_last: float = float(d[n - 1]) 291 292 for k in range(n, n_samples): 293 gamma = 1.0 294 d_line = float(d[k]) 295 296 reg = x[k : k - M - 1 : -1].copy() 297 298 for rt in range(M + 1): 299 row_u = rt 300 col_u = n - 1 - rt 301 idx_r = n - 1 - rt 302 303 u_val = float(self.ULineMatrix[row_u, col_u]) 304 r_val = float(reg[idx_r]) 305 306 cI = float(np.sqrt(u_val * u_val + r_val * r_val)) 307 if cI < self._tiny: 308 cos_t, sin_t = 1.0, 0.0 309 else: 310 cos_t, sin_t = (u_val / cI), (r_val / cI) 311 312 reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows( 313 reg, self.ULineMatrix[row_u, :], cos_t, sin_t 314 ) 315 316 gamma *= cos_t 317 318 dq2_rt = float(self.dLine_q2[row_u]) 319 new_d_line = (cos_t * d_line) - (sin_t * dq2_rt) 320 new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt) 321 d_line = float(new_d_line) 322 self.dLine_q2[row_u] = float(new_dq2_rt) 323 324 d_bar = np.empty(n + 1, dtype=np.float64) 325 d_bar[0] = d_line 326 d_bar[1:] = self.dLine_q2 327 328 w_new = np.zeros(n, dtype=np.float64) 329 330 den = float(self.ULineMatrix[n - 1, 0]) 331 if abs(den) < self._tiny: 332 den = self._tiny if den >= 0.0 else -self._tiny 333 w_new[0] = float(d_bar[n] / den) 334 335 for it in range(1, M + 1): 336 row = n - 1 - it 337 u_vec = self.ULineMatrix[row, 0:it][::-1] 338 w_vec = w_new[0:it][::-1] 339 num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it]) 340 341 den = float(self.ULineMatrix[row, it]) 342 if abs(den) < self._tiny: 343 den = self._tiny if den >= 0.0 else -self._tiny 344 345 w_new[it] = float(num / den) 346 347 self.w = w_new 348 self._record_history() 349 350 self.dLine_q2 *= sqrt_lam 351 self.ULineMatrix *= sqrt_lam 352 353 errors[k] = float(d_line * gamma) 354 outputs[k] = float(d[k] - errors[k]) 355 356 gamma_last = float(gamma) 357 d_line_last = float(d_line) 358 359 runtime_s = float(perf_counter() - t0) 360 if verbose: 361 print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms") 362 363 extra: Optional[Dict[str, Any]] = None 364 if return_internal_states: 365 extra = { 366 "ULineMatrix_last": self.ULineMatrix.copy(), 367 "dLine_q2_last": self.dLine_q2.copy(), 368 "gamma_last": gamma_last, 369 "d_line_last": d_line_last, 370 "forgetting_factor": float(self.lamb), 371 } 372 373 return self._pack_results( 374 outputs=outputs, 375 errors=errors, 376 runtime_s=runtime_s, 377 error_type="a_posteriori", 378 extra=extra, 379 )
QR-RLS adaptive filter using Givens rotations (real-valued).
QR-decomposition RLS implementation based on Diniz (Alg. 9.1, 3rd ed.),
following the reference MATLAB routine QR_RLS.m. This variant maintains
internal state variables closely matching the MATLAB code and applies
sequential real Givens rotations to a stacked system.
Parameters
filter_order : int
Adaptive FIR filter order M. The number of coefficients is M+1.
lamb : float, optional
Forgetting factor lambda with 0 < lambda <= 1. Default is 0.99.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (M+1,). If None,
initializes with zeros.
denom_floor : float, optional
Small positive floor used to avoid division by (near) zero in scalar
denominators. Default is 1e-18.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
State variables (MATLAB naming)
This implementation keeps the same key state variables as QR_RLS.m:
- ``ULineMatrix`` : ndarray, shape ``(M+1, M+1)``
Upper-triangular-like matrix updated by sequential Givens rotations.
- ``dLine_q2`` : ndarray, shape ``(M+1,)``
Transformed desired vector accumulated through the same rotations.
- ``gamma`` : float
Scalar accumulated as the product of Givens cosines in each iteration.
Givens-rotation structure (high level)
At each iteration, the algorithm applies Givens rotations to eliminate
components of the stacked vector [regressor; ULineMatrix] while
applying the same rotations to [d_line; dLine_q2]. The resulting
system is then solved by back-substitution to obtain the updated weights.
Output/error conventions (MATLAB-style)
The returned errors correspond to the MATLAB errorVector:
$$e[k] = d_{line}[k] \cdot \gamma[k],$$
and the reported output is computed as:
$$y[k] = d[k] - e[k].$$
Since this error is formed after the rotation steps (i.e., after the
QR-update stage), the method sets ``error_type="a_posteriori"``.
References
105 def __init__( 106 self, 107 filter_order: int, 108 lamb: float = 0.99, 109 w_init: Optional[ArrayLike] = None, 110 *, 111 denom_floor: float = 1e-18, 112 ) -> None: 113 super().__init__(filter_order=int(filter_order), w_init=w_init) 114 115 self.lamb = float(lamb) 116 if not (0.0 < self.lamb <= 1.0): 117 raise ValueError(f"lamb must satisfy 0 < lamb <= 1. Got {self.lamb}.") 118 119 self._tiny = float(denom_floor) 120 121 self.n_coeffs = int(self.filter_order) + 1 122 123 self.w = np.asarray(self.w, dtype=np.float64) 124 125 if w_init is not None: 126 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 127 if w0.size != self.n_coeffs: 128 raise ValueError( 129 f"w_init must have length {self.n_coeffs}, got {w0.size}." 130 ) 131 self.w = w0.copy() 132 133 self.ULineMatrix = np.zeros((self.n_coeffs, self.n_coeffs), dtype=np.float64) 134 self.dLine_q2 = np.zeros(self.n_coeffs, dtype=np.float64) 135 136 self.w_history = [] 137 self._record_history()
182 @ensure_real_signals 183 @validate_input 184 def optimize( 185 self, 186 input_signal: np.ndarray, 187 desired_signal: np.ndarray, 188 verbose: bool = False, 189 return_internal_states: bool = False, 190 ) -> OptimizationResult: 191 """ 192 Executes the QR-RLS adaptation loop (MATLAB-style recursion). 193 194 Parameters 195 ---------- 196 input_signal : array_like of float 197 Real-valued input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 198 desired_signal : array_like of float 199 Real-valued desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 200 verbose : bool, optional 201 If True, prints the total runtime after completion. 202 return_internal_states : bool, optional 203 If True, includes the last internal states in ``result.extra``: 204 ``"ULineMatrix_last"``, ``"dLine_q2_last"``, ``"gamma_last"``, 205 and ``"d_line_last"``. 206 207 Returns 208 ------- 209 OptimizationResult 210 Result object with fields: 211 - outputs : ndarray of float, shape ``(N,)`` 212 Scalar output sequence as computed by the MATLAB-style routine: 213 ``y[k] = d[k] - e[k]``. 214 - errors : ndarray of float, shape ``(N,)`` 215 MATLAB-style a posteriori error quantity: 216 ``e[k] = d_line[k] * gamma[k]``. 217 - coefficients : ndarray of float 218 Coefficient history recorded by the base class. 219 - error_type : str 220 Set to ``"a_posteriori"``. 221 - extra : dict, optional 222 Present only if ``return_internal_states=True`` with: 223 - ``ULineMatrix_last`` : ndarray 224 Final ``ULineMatrix``. 225 - ``dLine_q2_last`` : ndarray 226 Final ``dLine_q2``. 227 - ``gamma_last`` : float 228 ``gamma`` at the last iteration. 229 - ``d_line_last`` : float 230 ``d_line`` at the last iteration. 231 - ``forgetting_factor`` : float 232 The forgetting factor ``lambda`` used. 233 """ 234 t0 = perf_counter() 235 236 x = np.asarray(input_signal, dtype=np.float64).ravel() 237 d = np.asarray(desired_signal, dtype=np.float64).ravel() 238 239 n_samples = int(d.size) 240 n = int(self.n_coeffs) 241 M = int(self.filter_order) 242 243 if n_samples < n: 244 raise ValueError( 245 f"QR-RLS needs at least (filter_order+1) samples. " 246 f"Got n_samples={n_samples}, filter_order={M} => n_coeffs={n}." 247 ) 248 249 outputs = np.zeros(n_samples, dtype=np.float64) 250 errors = np.zeros(n_samples, dtype=np.float64) 251 252 self.ULineMatrix.fill(0.0) 253 self.dLine_q2.fill(0.0) 254 255 self.w_history = [] 256 self._record_history() 257 258 denom0 = float(x[0]) 259 if abs(denom0) < self._tiny: 260 denom0 = self._tiny if denom0 >= 0.0 else -self._tiny 261 262 for kt in range(n): 263 w_tmp = np.zeros(n, dtype=np.float64) 264 w_tmp[0] = float(d[0] / denom0) 265 266 for ct in range(1, kt + 1): 267 num = -float(np.dot(x[1 : ct + 1], w_tmp[ct - 1 :: -1])) + float(d[ct]) 268 w_tmp[ct] = float(num / denom0) 269 270 self.w = w_tmp 271 self._record_history() 272 273 xk = np.zeros(n, dtype=np.float64) 274 start = max(0, kt - M) 275 seg = x[start : kt + 1][::-1] 276 xk[: seg.size] = seg 277 outputs[kt] = float(np.dot(w_tmp, xk)) 278 279 sqrt_lam = float(np.sqrt(self.lamb)) 280 281 for it in range(M + 1): 282 scale = float(self.lamb ** ((it + 1) / 2.0)) 283 284 vec = x[(n - it - 1) :: -1] 285 self.ULineMatrix[it, 0 : (n - it)] = scale * vec 286 287 self.dLine_q2[it] = scale * float(d[n - it - 1]) 288 289 gamma_last: float = 1.0 290 d_line_last: float = float(d[n - 1]) 291 292 for k in range(n, n_samples): 293 gamma = 1.0 294 d_line = float(d[k]) 295 296 reg = x[k : k - M - 1 : -1].copy() 297 298 for rt in range(M + 1): 299 row_u = rt 300 col_u = n - 1 - rt 301 idx_r = n - 1 - rt 302 303 u_val = float(self.ULineMatrix[row_u, col_u]) 304 r_val = float(reg[idx_r]) 305 306 cI = float(np.sqrt(u_val * u_val + r_val * r_val)) 307 if cI < self._tiny: 308 cos_t, sin_t = 1.0, 0.0 309 else: 310 cos_t, sin_t = (u_val / cI), (r_val / cI) 311 312 reg, self.ULineMatrix[row_u, :] = self._givens_rotate_rows( 313 reg, self.ULineMatrix[row_u, :], cos_t, sin_t 314 ) 315 316 gamma *= cos_t 317 318 dq2_rt = float(self.dLine_q2[row_u]) 319 new_d_line = (cos_t * d_line) - (sin_t * dq2_rt) 320 new_dq2_rt = (sin_t * d_line) + (cos_t * dq2_rt) 321 d_line = float(new_d_line) 322 self.dLine_q2[row_u] = float(new_dq2_rt) 323 324 d_bar = np.empty(n + 1, dtype=np.float64) 325 d_bar[0] = d_line 326 d_bar[1:] = self.dLine_q2 327 328 w_new = np.zeros(n, dtype=np.float64) 329 330 den = float(self.ULineMatrix[n - 1, 0]) 331 if abs(den) < self._tiny: 332 den = self._tiny if den >= 0.0 else -self._tiny 333 w_new[0] = float(d_bar[n] / den) 334 335 for it in range(1, M + 1): 336 row = n - 1 - it 337 u_vec = self.ULineMatrix[row, 0:it][::-1] 338 w_vec = w_new[0:it][::-1] 339 num = -float(np.dot(u_vec, w_vec)) + float(d_bar[n - it]) 340 341 den = float(self.ULineMatrix[row, it]) 342 if abs(den) < self._tiny: 343 den = self._tiny if den >= 0.0 else -self._tiny 344 345 w_new[it] = float(num / den) 346 347 self.w = w_new 348 self._record_history() 349 350 self.dLine_q2 *= sqrt_lam 351 self.ULineMatrix *= sqrt_lam 352 353 errors[k] = float(d_line * gamma) 354 outputs[k] = float(d[k] - errors[k]) 355 356 gamma_last = float(gamma) 357 d_line_last = float(d_line) 358 359 runtime_s = float(perf_counter() - t0) 360 if verbose: 361 print(f"[QRRLS] Completed in {runtime_s * 1000:.03f} ms") 362 363 extra: Optional[Dict[str, Any]] = None 364 if return_internal_states: 365 extra = { 366 "ULineMatrix_last": self.ULineMatrix.copy(), 367 "dLine_q2_last": self.dLine_q2.copy(), 368 "gamma_last": gamma_last, 369 "d_line_last": d_line_last, 370 "forgetting_factor": float(self.lamb), 371 } 372 373 return self._pack_results( 374 outputs=outputs, 375 errors=errors, 376 runtime_s=runtime_s, 377 error_type="a_posteriori", 378 extra=extra, 379 )
Executes the QR-RLS adaptation loop (MATLAB-style recursion).
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Real-valued desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"ULineMatrix_last", "dLine_q2_last", "gamma_last",
and "d_line_last".
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence as computed by the MATLAB-style routine:
y[k] = d[k] - e[k].
- errors : ndarray of float, shape (N,)
MATLAB-style a posteriori error quantity:
e[k] = d_line[k] * gamma[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_posteriori".
- extra : dict, optional
Present only if return_internal_states=True with:
- ULineMatrix_last : ndarray
Final ULineMatrix.
- dLine_q2_last : ndarray
Final dLine_q2.
- gamma_last : float
gamma at the last iteration.
- d_line_last : float
d_line at the last iteration.
- forgetting_factor : float
The forgetting factor lambda used.
26class ErrorEquation(AdaptiveFilter): 27 """ 28 Equation-Error RLS for adaptive IIR filtering (real-valued). 29 30 The equation-error approach avoids the non-convexity of direct IIR 31 output-error minimization by adapting the coefficients using an auxiliary 32 (linear-in-parameters) error in which past outputs in the feedback path are 33 replaced by past desired samples. This yields a quadratic (RLS-suitable) 34 criterion while still producing a "true IIR" output for evaluation. 35 36 This implementation follows Diniz (3rd ed., Alg. 10.3) and is restricted to 37 **real-valued** signals (enforced by ``ensure_real_signals``). 38 39 Parameters 40 ---------- 41 zeros_order : int 42 Numerator order ``N`` (number of zeros). The feedforward part has 43 ``N + 1`` coefficients. 44 poles_order : int 45 Denominator order ``M`` (number of poles). The feedback part has ``M`` 46 coefficients. 47 forgetting_factor : float, optional 48 Exponential forgetting factor ``lambda``. Default is 0.99. 49 epsilon : float, optional 50 Positive initialization for the inverse correlation matrix used by RLS. 51 Internally, the inverse covariance is initialized as: 52 53 .. math:: 54 S(0) = \\frac{1}{\\epsilon} I. 55 56 Default is 1e-3. 57 w_init : array_like of float, optional 58 Optional initial coefficient vector. If provided, it should have shape 59 ``(M + N + 1,)`` following the parameter order described below. If None, 60 the implementation initializes with zeros (and ignores ``w_init``). 61 62 Notes 63 ----- 64 Parameterization (as implemented) 65 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 66 The coefficient vector is arranged as: 67 68 - ``w[:M]``: feedback (pole) coefficients (denoted ``a`` in literature) 69 - ``w[M:]``: feedforward (zero) coefficients (denoted ``b``) 70 71 Regressors and two outputs 72 ~~~~~~~~~~~~~~~~~~~~~~~~~~ 73 At time ``k``, define ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T``. 74 The algorithm forms two regressors: 75 76 - Output regressor (uses past *true outputs*): 77 78 .. math:: 79 \\varphi_y(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T. 80 81 - Equation regressor (uses past *desired samples*): 82 83 .. math:: 84 \\varphi_e(k) = [d(k-1), \\ldots, d(k-M),\\; x(k), \\ldots, x(k-N)]^T. 85 86 The reported output is the "true IIR" output computed with the output 87 regressor: 88 89 .. math:: 90 y(k) = w^T(k)\\, \\varphi_y(k), 91 92 while the auxiliary "equation" output is: 93 94 .. math:: 95 y_{eq}(k) = w^T(k)\\, \\varphi_e(k). 96 97 The adaptation is driven by the *equation error*: 98 99 .. math:: 100 e_{eq}(k) = d(k) - y_{eq}(k), 101 102 whereas the "output error" used for evaluation is: 103 104 .. math:: 105 e(k) = d(k) - y(k). 106 107 Stability procedure 108 ~~~~~~~~~~~~~~~~~~~ 109 After each update, the feedback coefficients ``w[:M]`` are stabilized by 110 reflecting any poles outside the unit circle back inside (pole reflection). 111 112 References 113 ---------- 114 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 115 Implementation*, 3rd ed., Algorithm 10.3. 116 """ 117 118 supports_complex: bool = False 119 zeros_order: int 120 poles_order: int 121 forgetting_factor: float 122 epsilon: float 123 n_coeffs: int 124 Sd: np.ndarray 125 y_buffer: np.ndarray 126 d_buffer: np.ndarray 127 128 def __init__( 129 self, 130 zeros_order: int, 131 poles_order: int, 132 forgetting_factor: float = 0.99, 133 epsilon: float = 1e-3, 134 w_init: Optional[Union[np.ndarray, list]] = None, 135 ) -> None: 136 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 137 138 self.zeros_order = int(zeros_order) 139 self.poles_order = int(poles_order) 140 self.forgetting_factor = float(forgetting_factor) 141 self.epsilon = float(epsilon) 142 143 self.n_coeffs = int(self.poles_order + self.zeros_order + 1) 144 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 145 146 self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64) 147 148 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 149 self.d_buffer = np.zeros(self.poles_order, dtype=np.float64) 150 151 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 152 """ 153 Enforces IIR stability by reflecting poles outside the unit circle back inside. 154 This ensures the recursive part of the filter does not diverge. 155 """ 156 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 157 poles: np.ndarray = np.roots(poly_coeffs) 158 mask: np.ndarray = np.abs(poles) > 1.0 159 160 if np.any(mask): 161 poles[mask] = 1.0 / np.conj(poles[mask]) 162 new_poly: np.ndarray = np.poly(poles) 163 return -np.real(new_poly[1:]) 164 return a_coeffs 165 166 @ensure_real_signals 167 @validate_input 168 def optimize( 169 self, 170 input_signal: np.ndarray, 171 desired_signal: np.ndarray, 172 verbose: bool = False, 173 return_internal_states: bool = False, 174 ) -> OptimizationResult: 175 """ 176 Executes the equation-error RLS adaptation loop. 177 178 Parameters 179 ---------- 180 input_signal : array_like of float 181 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 182 desired_signal : array_like of float 183 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 184 Must have the same length as ``input_signal``. 185 verbose : bool, optional 186 If True, prints the total runtime after completion. 187 return_internal_states : bool, optional 188 If True, includes the time history of the feedback (pole) 189 coefficients in ``result.extra["a_coefficients"]`` with shape 190 ``(N, poles_order)`` (or None if ``poles_order == 0``). 191 192 Returns 193 ------- 194 OptimizationResult 195 Result object with fields: 196 - outputs : ndarray of float, shape ``(N,)`` 197 "True IIR" output sequence ``y[k]`` computed with past outputs. 198 - errors : ndarray of float, shape ``(N,)`` 199 Output error sequence ``e[k] = d[k] - y[k]``. 200 - coefficients : ndarray of float 201 Coefficient history recorded by the base class. 202 - error_type : str 203 Set to ``"equation_error"``. 204 - extra : dict 205 Always includes: 206 - ``"auxiliary_errors"``: ndarray of float, shape ``(N,)`` with 207 the equation error ``e_eq[k] = d[k] - y_eq[k]`` used to drive 208 the RLS update. 209 Additionally includes ``"a_coefficients"`` if 210 ``return_internal_states=True``. 211 """ 212 tic: float = time() 213 214 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 215 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 216 n_samples: int = int(x.size) 217 218 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 219 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 220 errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64) 221 222 a_track: Optional[np.ndarray] = ( 223 np.zeros((n_samples, self.poles_order), dtype=np.float64) 224 if (return_internal_states and self.poles_order > 0) 225 else None 226 ) 227 228 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 229 x_padded[self.zeros_order:] = x 230 231 232 233 for k in range(n_samples): 234 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 235 236 reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 237 238 reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x)) 239 240 y_out: float = float(np.dot(self.w, reg_y)) 241 y_equation: float = float(np.dot(self.w, reg_e)) 242 243 outputs[k] = y_out 244 errors[k] = float(d[k] - y_out) 245 errors_aux[k] = float(d[k] - y_equation) 246 247 psi: np.ndarray = self.Sd @ reg_e 248 den: float = float(self.forgetting_factor + reg_e.T @ psi) 249 250 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 251 self.w += (self.Sd @ reg_e) * errors_aux[k] 252 253 if self.poles_order > 0: 254 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 255 256 if return_internal_states and a_track is not None: 257 a_track[k, :] = self.w[: self.poles_order] 258 259 self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1])) 260 self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1])) 261 262 self._record_history() 263 264 runtime_s: float = float(time() - tic) 265 if verbose: 266 print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms") 267 268 extra: Dict[str, Any] = {"auxiliary_errors": errors_aux} 269 if return_internal_states: 270 extra["a_coefficients"] = a_track 271 272 return self._pack_results( 273 outputs=outputs, 274 errors=errors, 275 runtime_s=runtime_s, 276 error_type="equation_error", 277 extra=extra, 278 )
Equation-Error RLS for adaptive IIR filtering (real-valued).
The equation-error approach avoids the non-convexity of direct IIR output-error minimization by adapting the coefficients using an auxiliary (linear-in-parameters) error in which past outputs in the feedback path are replaced by past desired samples. This yields a quadratic (RLS-suitable) criterion while still producing a "true IIR" output for evaluation.
This implementation follows Diniz (3rd ed., Alg. 10.3) and is restricted to
real-valued signals (enforced by ensure_real_signals).
Parameters
zeros_order : int
Numerator order N (number of zeros). The feedforward part has
N + 1 coefficients.
poles_order : int
Denominator order M (number of poles). The feedback part has M
coefficients.
forgetting_factor : float, optional
Exponential forgetting factor lambda. Default is 0.99.
epsilon : float, optional
Positive initialization for the inverse correlation matrix used by RLS.
Internally, the inverse covariance is initialized as:
$$S(0) = \frac{1}{\epsilon} I.$$
Default is 1e-3.
w_init : array_like of float, optional
Optional initial coefficient vector. If provided, it should have shape
(M + N + 1,) following the parameter order described below. If None,
the implementation initializes with zeros (and ignores w_init).
Notes
Parameterization (as implemented)
~~~~~~~~~
The coefficient vector is arranged as:
w[:M]: feedback (pole) coefficients (denotedain literature)w[M:]: feedforward (zero) coefficients (denotedb)
Regressors and two outputs
~~~~~~
At time k, define reg_x = [x(k), x(k-1), ..., x(k-N)]^T.
The algorithm forms two regressors:
Output regressor (uses past true outputs):
$$\varphi_y(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T.$$
Equation regressor (uses past desired samples):
$$\varphi_e(k) = [d(k-1), \ldots, d(k-M),\; x(k), \ldots, x(k-N)]^T.$$
The reported output is the "true IIR" output computed with the output regressor:
$$y(k) = w^T(k)\, \varphi_y(k),$$
while the auxiliary "equation" output is:
$$y_{eq}(k) = w^T(k)\, \varphi_e(k).$$
The adaptation is driven by the equation error:
$$e_{eq}(k) = d(k) - y_{eq}(k),$$
whereas the "output error" used for evaluation is:
$$e(k) = d(k) - y(k).$$
Stability procedure
~~~~~~~
After each update, the feedback coefficients w[:M] are stabilized by
reflecting any poles outside the unit circle back inside (pole reflection).
References
128 def __init__( 129 self, 130 zeros_order: int, 131 poles_order: int, 132 forgetting_factor: float = 0.99, 133 epsilon: float = 1e-3, 134 w_init: Optional[Union[np.ndarray, list]] = None, 135 ) -> None: 136 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 137 138 self.zeros_order = int(zeros_order) 139 self.poles_order = int(poles_order) 140 self.forgetting_factor = float(forgetting_factor) 141 self.epsilon = float(epsilon) 142 143 self.n_coeffs = int(self.poles_order + self.zeros_order + 1) 144 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 145 146 self.Sd = (1.0 / self.epsilon) * np.eye(self.n_coeffs, dtype=np.float64) 147 148 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 149 self.d_buffer = np.zeros(self.poles_order, dtype=np.float64)
166 @ensure_real_signals 167 @validate_input 168 def optimize( 169 self, 170 input_signal: np.ndarray, 171 desired_signal: np.ndarray, 172 verbose: bool = False, 173 return_internal_states: bool = False, 174 ) -> OptimizationResult: 175 """ 176 Executes the equation-error RLS adaptation loop. 177 178 Parameters 179 ---------- 180 input_signal : array_like of float 181 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 182 desired_signal : array_like of float 183 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 184 Must have the same length as ``input_signal``. 185 verbose : bool, optional 186 If True, prints the total runtime after completion. 187 return_internal_states : bool, optional 188 If True, includes the time history of the feedback (pole) 189 coefficients in ``result.extra["a_coefficients"]`` with shape 190 ``(N, poles_order)`` (or None if ``poles_order == 0``). 191 192 Returns 193 ------- 194 OptimizationResult 195 Result object with fields: 196 - outputs : ndarray of float, shape ``(N,)`` 197 "True IIR" output sequence ``y[k]`` computed with past outputs. 198 - errors : ndarray of float, shape ``(N,)`` 199 Output error sequence ``e[k] = d[k] - y[k]``. 200 - coefficients : ndarray of float 201 Coefficient history recorded by the base class. 202 - error_type : str 203 Set to ``"equation_error"``. 204 - extra : dict 205 Always includes: 206 - ``"auxiliary_errors"``: ndarray of float, shape ``(N,)`` with 207 the equation error ``e_eq[k] = d[k] - y_eq[k]`` used to drive 208 the RLS update. 209 Additionally includes ``"a_coefficients"`` if 210 ``return_internal_states=True``. 211 """ 212 tic: float = time() 213 214 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 215 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 216 n_samples: int = int(x.size) 217 218 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 219 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 220 errors_aux: np.ndarray = np.zeros(n_samples, dtype=np.float64) 221 222 a_track: Optional[np.ndarray] = ( 223 np.zeros((n_samples, self.poles_order), dtype=np.float64) 224 if (return_internal_states and self.poles_order > 0) 225 else None 226 ) 227 228 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 229 x_padded[self.zeros_order:] = x 230 231 232 233 for k in range(n_samples): 234 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 235 236 reg_y: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 237 238 reg_e: np.ndarray = np.concatenate((self.d_buffer, reg_x)) 239 240 y_out: float = float(np.dot(self.w, reg_y)) 241 y_equation: float = float(np.dot(self.w, reg_e)) 242 243 outputs[k] = y_out 244 errors[k] = float(d[k] - y_out) 245 errors_aux[k] = float(d[k] - y_equation) 246 247 psi: np.ndarray = self.Sd @ reg_e 248 den: float = float(self.forgetting_factor + reg_e.T @ psi) 249 250 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 251 self.w += (self.Sd @ reg_e) * errors_aux[k] 252 253 if self.poles_order > 0: 254 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 255 256 if return_internal_states and a_track is not None: 257 a_track[k, :] = self.w[: self.poles_order] 258 259 self.y_buffer = np.concatenate(([y_out], self.y_buffer[:-1])) 260 self.d_buffer = np.concatenate(([d[k]], self.d_buffer[:-1])) 261 262 self._record_history() 263 264 runtime_s: float = float(time() - tic) 265 if verbose: 266 print(f"[ErrorEquation] Completed in {runtime_s * 1000:.02f} ms") 267 268 extra: Dict[str, Any] = {"auxiliary_errors": errors_aux} 269 if return_internal_states: 270 extra["a_coefficients"] = a_track 271 272 return self._pack_results( 273 outputs=outputs, 274 errors=errors, 275 runtime_s=runtime_s, 276 error_type="equation_error", 277 extra=extra, 278 )
Executes the equation-error RLS adaptation loop.
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the time history of the feedback (pole)
coefficients in result.extra["a_coefficients"] with shape
(N, poles_order) (or None if poles_order == 0).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
"True IIR" output sequence y[k] computed with past outputs.
- errors : ndarray of float, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "equation_error".
- extra : dict
Always includes:
- "auxiliary_errors": ndarray of float, shape (N,) with
the equation error e_eq[k] = d[k] - y_eq[k] used to drive
the RLS update.
Additionally includes "a_coefficients" if
return_internal_states=True.
26class GaussNewton(AdaptiveFilter): 27 """ 28 Gauss-Newton (recursive) output-error adaptation for IIR filters (real-valued). 29 30 This method targets the output-error (OE) criterion for IIR adaptive filtering, 31 i.e., it adapts coefficients to reduce the squared error 32 :math:`e(k) = d(k) - y(k)` where :math:`y(k)` is produced by the *recursive* 33 (IIR) structure. 34 35 The Gauss-Newton idea is to approximate the Hessian of the OE cost by an 36 outer-product model based on a sensitivity (Jacobian-like) vector 37 :math:`\\phi(k)`. In this implementation, the associated inverse matrix 38 (named ``Sd``) is updated recursively in an RLS-like fashion with an 39 exponential smoothing factor ``alpha``. This yields faster convergence than 40 plain gradient descent at the cost of roughly :math:`O((M+N)^2)` operations 41 per sample. 42 43 This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation 44 is restricted to **real-valued** signals (enforced by ``ensure_real_signals``). 45 46 Parameters 47 ---------- 48 zeros_order : int 49 Numerator order ``N`` (number of zeros). The feedforward part has 50 ``N + 1`` coefficients. 51 poles_order : int 52 Denominator order ``M`` (number of poles). The feedback part has ``M`` 53 coefficients. 54 alpha : float, optional 55 Smoothing factor used in the recursive update of the inverse Hessian-like 56 matrix ``Sd``. Must satisfy ``0 < alpha < 1``. Smaller values yield 57 slower adaptation of ``Sd`` (more memory). Default is 0.05. 58 step_size : float, optional 59 Step size applied to the Gauss-Newton direction. Default is 1.0. 60 delta : float, optional 61 Positive regularization parameter for initializing ``Sd`` as 62 :math:`S(0) = \\delta^{-1} I`. Default is 1e-3. 63 w_init : array_like of float, optional 64 Optional initial coefficient vector. If provided, it should have shape 65 ``(M + N + 1,)`` following the parameter order described below. If None, 66 the implementation initializes with zeros (and ignores ``w_init``). 67 68 Notes 69 ----- 70 Parameterization (as implemented) 71 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 72 The coefficient vector is arranged as: 73 74 - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``) 75 - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``) 76 77 Regressor and OE error (as implemented) 78 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 79 With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the 80 last ``M`` outputs, the code forms: 81 82 .. math:: 83 \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T, 84 85 computes: 86 87 .. math:: 88 y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k), 89 90 and uses ``e(k)`` as the output-error signal reported in ``errors``. 91 92 Sensitivity vector and Gauss-Newton recursion 93 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 94 The update direction is built from filtered sensitivity signals stored in 95 internal buffers (``x_line_buffer`` and ``y_line_buffer``). The code forms: 96 97 .. math:: 98 \\phi(k) = 99 [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\; 100 -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T. 101 102 Given ``psi = Sd * phi`` and the scalar denominator 103 104 .. math:: 105 \\text{den}(k) = \\frac{1-\\alpha}{\\alpha} + \\phi^T(k)\\, Sd(k-1)\\, \\phi(k), 106 107 the inverse Hessian-like matrix is updated as: 108 109 .. math:: 110 Sd(k) = \\frac{1}{1-\\alpha}\\left(Sd(k-1) - \\frac{\\psi(k)\\psi^T(k)}{\\text{den}(k)}\\right), 111 112 and the coefficient update is: 113 114 .. math:: 115 w(k+1) = w(k) - \\mu\\, Sd(k)\\, \\phi(k)\\, e(k). 116 117 Stability procedure 118 ~~~~~~~~~~~~~~~~~~~ 119 After each update, the feedback coefficients ``w[:M]`` are stabilized by 120 reflecting poles outside the unit circle back inside (pole reflection). 121 122 References 123 ---------- 124 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 125 Implementation*, 3rd ed., Algorithm 10.1 (modified). 126 """ 127 128 supports_complex: bool = False 129 zeros_order: int 130 poles_order: int 131 alpha: float 132 step_size: float 133 delta: float 134 n_coeffs: int 135 Sd: np.ndarray 136 y_buffer: np.ndarray 137 x_line_buffer: np.ndarray 138 y_line_buffer: np.ndarray 139 140 def __init__( 141 self, 142 zeros_order: int, 143 poles_order: int, 144 alpha: float = 0.05, 145 step_size: float = 1.0, 146 delta: float = 1e-3, 147 w_init: Optional[Union[np.ndarray, list]] = None, 148 ) -> None: 149 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 150 151 self.zeros_order = int(zeros_order) 152 self.poles_order = int(poles_order) 153 self.alpha = float(alpha) 154 self.step_size = float(step_size) 155 self.delta = float(delta) 156 157 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 158 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 159 160 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 161 162 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 163 164 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 165 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 166 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 167 168 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 169 """ 170 Reflects poles outside the unit circle back inside to maintain stability. 171 """ 172 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 173 poles: np.ndarray = np.roots(poly_coeffs) 174 mask: np.ndarray = np.abs(poles) > 1.0 175 if np.any(mask): 176 poles[mask] = 1.0 / np.conj(poles[mask]) 177 new_poly: np.ndarray = np.poly(poles) 178 return -np.real(new_poly[1:]) 179 return a_coeffs 180 181 @ensure_real_signals 182 @validate_input 183 def optimize( 184 self, 185 input_signal: np.ndarray, 186 desired_signal: np.ndarray, 187 verbose: bool = False, 188 return_internal_states: bool = False, 189 ) -> OptimizationResult: 190 """ 191 Executes the (recursive) Gauss-Newton OE adaptation loop. 192 193 Parameters 194 ---------- 195 input_signal : array_like of float 196 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 197 desired_signal : array_like of float 198 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 199 Must have the same length as ``input_signal``. 200 verbose : bool, optional 201 If True, prints the total runtime after completion. 202 return_internal_states : bool, optional 203 If True, includes sensitivity trajectories in ``result.extra``: 204 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 205 scalar sensitivity signal :math:`\\underline{x}(k)`. 206 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 207 scalar sensitivity signal :math:`\\underline{y}(k)`. 208 209 Returns 210 ------- 211 OptimizationResult 212 Result object with fields: 213 - outputs : ndarray of float, shape ``(N,)`` 214 Output sequence ``y[k]`` produced by the current IIR structure. 215 - errors : ndarray of float, shape ``(N,)`` 216 Output error sequence ``e[k] = d[k] - y[k]``. 217 - coefficients : ndarray of float 218 Coefficient history recorded by the base class. 219 - error_type : str 220 Set to ``"output_error"``. 221 - extra : dict 222 Empty unless ``return_internal_states=True``. 223 """ 224 tic: float = time() 225 226 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 227 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 228 n_samples: int = int(x.size) 229 230 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 231 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 232 233 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 234 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 235 236 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 237 x_padded[self.zeros_order:] = x 238 239 inv_alpha: float = float(1.0 - self.alpha) 240 alpha_ratio: float = float(inv_alpha / self.alpha) 241 242 243 244 for k in range(n_samples): 245 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 246 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 247 248 y_k: float = float(np.dot(self.w, regressor)) 249 outputs[k] = y_k 250 e_k: float = float(d[k] - y_k) 251 errors[k] = e_k 252 253 a_coeffs: np.ndarray = self.w[: self.poles_order] 254 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 255 256 y_line_k: float = 0.0 257 if self.poles_order > 0: 258 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 259 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 260 261 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 262 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 263 264 if return_internal_states and x_line_track is not None: 265 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 266 267 phi: np.ndarray = np.concatenate( 268 ( 269 self.y_line_buffer[: self.poles_order], 270 -self.x_line_buffer[: self.zeros_order + 1], 271 ) 272 ) 273 274 psi: np.ndarray = self.Sd @ phi 275 den: float = float(alpha_ratio + phi.T @ psi) 276 277 self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den) 278 self.w -= self.step_size * (self.Sd @ phi) * e_k 279 280 if self.poles_order > 0: 281 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 282 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 283 284 self._record_history() 285 286 runtime_s: float = float(time() - tic) 287 if verbose: 288 print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms") 289 290 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 291 292 return self._pack_results( 293 outputs=outputs, 294 errors=errors, 295 runtime_s=runtime_s, 296 error_type="output_error", 297 extra=extra, 298 )
Gauss-Newton (recursive) output-error adaptation for IIR filters (real-valued).
This method targets the output-error (OE) criterion for IIR adaptive filtering, i.e., it adapts coefficients to reduce the squared error \( e(k) = d(k) - y(k) \) where \( y(k) \) is produced by the recursive (IIR) structure.
The Gauss-Newton idea is to approximate the Hessian of the OE cost by an
outer-product model based on a sensitivity (Jacobian-like) vector
\( \phi(k) \). In this implementation, the associated inverse matrix
(named Sd) is updated recursively in an RLS-like fashion with an
exponential smoothing factor alpha. This yields faster convergence than
plain gradient descent at the cost of roughly \( O((M+N)^2) \) operations
per sample.
This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation
is restricted to real-valued signals (enforced by ensure_real_signals).
Parameters
zeros_order : int
Numerator order N (number of zeros). The feedforward part has
N + 1 coefficients.
poles_order : int
Denominator order M (number of poles). The feedback part has M
coefficients.
alpha : float, optional
Smoothing factor used in the recursive update of the inverse Hessian-like
matrix Sd. Must satisfy 0 < alpha < 1. Smaller values yield
slower adaptation of Sd (more memory). Default is 0.05.
step_size : float, optional
Step size applied to the Gauss-Newton direction. Default is 1.0.
delta : float, optional
Positive regularization parameter for initializing Sd as
\( S(0) = \delta^{-1} I \). Default is 1e-3.
w_init : array_like of float, optional
Optional initial coefficient vector. If provided, it should have shape
(M + N + 1,) following the parameter order described below. If None,
the implementation initializes with zeros (and ignores w_init).
Notes
Parameterization (as implemented)
~~~~~~~~~
The coefficient vector is arranged as:
w[:M]: feedback (pole) coefficients (often denoteda)w[M:]: feedforward (zero) coefficients (often denotedb)
Regressor and OE error (as implemented)
~~~~~~~~~~~
With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the
last M outputs, the code forms:
$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$
computes:
$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k),$$
and uses e(k) as the output-error signal reported in errors.
Sensitivity vector and Gauss-Newton recursion
~~~~~~~~~
The update direction is built from filtered sensitivity signals stored in
internal buffers (x_line_buffer and y_line_buffer). The code forms:
$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T.$$
Given psi = Sd * phi and the scalar denominator
$$\text{den}(k) = \frac{1-\alpha}{\alpha} + \phi^T(k)\, Sd(k-1)\, \phi(k),$$
the inverse Hessian-like matrix is updated as:
$$Sd(k) = \frac{1}{1-\alpha}\left(Sd(k-1) - \frac{\psi(k)\psi^T(k)}{\text{den}(k)}\right),$$
and the coefficient update is:
$$w(k+1) = w(k) - \mu\, Sd(k)\, \phi(k)\, e(k).$$
Stability procedure
~~~~~~~
After each update, the feedback coefficients w[:M] are stabilized by
reflecting poles outside the unit circle back inside (pole reflection).
References
140 def __init__( 141 self, 142 zeros_order: int, 143 poles_order: int, 144 alpha: float = 0.05, 145 step_size: float = 1.0, 146 delta: float = 1e-3, 147 w_init: Optional[Union[np.ndarray, list]] = None, 148 ) -> None: 149 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 150 151 self.zeros_order = int(zeros_order) 152 self.poles_order = int(poles_order) 153 self.alpha = float(alpha) 154 self.step_size = float(step_size) 155 self.delta = float(delta) 156 157 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 158 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 159 160 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 161 162 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 163 164 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 165 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 166 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
181 @ensure_real_signals 182 @validate_input 183 def optimize( 184 self, 185 input_signal: np.ndarray, 186 desired_signal: np.ndarray, 187 verbose: bool = False, 188 return_internal_states: bool = False, 189 ) -> OptimizationResult: 190 """ 191 Executes the (recursive) Gauss-Newton OE adaptation loop. 192 193 Parameters 194 ---------- 195 input_signal : array_like of float 196 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 197 desired_signal : array_like of float 198 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 199 Must have the same length as ``input_signal``. 200 verbose : bool, optional 201 If True, prints the total runtime after completion. 202 return_internal_states : bool, optional 203 If True, includes sensitivity trajectories in ``result.extra``: 204 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 205 scalar sensitivity signal :math:`\\underline{x}(k)`. 206 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 207 scalar sensitivity signal :math:`\\underline{y}(k)`. 208 209 Returns 210 ------- 211 OptimizationResult 212 Result object with fields: 213 - outputs : ndarray of float, shape ``(N,)`` 214 Output sequence ``y[k]`` produced by the current IIR structure. 215 - errors : ndarray of float, shape ``(N,)`` 216 Output error sequence ``e[k] = d[k] - y[k]``. 217 - coefficients : ndarray of float 218 Coefficient history recorded by the base class. 219 - error_type : str 220 Set to ``"output_error"``. 221 - extra : dict 222 Empty unless ``return_internal_states=True``. 223 """ 224 tic: float = time() 225 226 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 227 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 228 n_samples: int = int(x.size) 229 230 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 231 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 232 233 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 234 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 235 236 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 237 x_padded[self.zeros_order:] = x 238 239 inv_alpha: float = float(1.0 - self.alpha) 240 alpha_ratio: float = float(inv_alpha / self.alpha) 241 242 243 244 for k in range(n_samples): 245 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 246 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 247 248 y_k: float = float(np.dot(self.w, regressor)) 249 outputs[k] = y_k 250 e_k: float = float(d[k] - y_k) 251 errors[k] = e_k 252 253 a_coeffs: np.ndarray = self.w[: self.poles_order] 254 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 255 256 y_line_k: float = 0.0 257 if self.poles_order > 0: 258 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 259 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 260 261 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 262 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 263 264 if return_internal_states and x_line_track is not None: 265 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 266 267 phi: np.ndarray = np.concatenate( 268 ( 269 self.y_line_buffer[: self.poles_order], 270 -self.x_line_buffer[: self.zeros_order + 1], 271 ) 272 ) 273 274 psi: np.ndarray = self.Sd @ phi 275 den: float = float(alpha_ratio + phi.T @ psi) 276 277 self.Sd = (1.0 / inv_alpha) * (self.Sd - np.outer(psi, psi) / den) 278 self.w -= self.step_size * (self.Sd @ phi) * e_k 279 280 if self.poles_order > 0: 281 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 282 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 283 284 self._record_history() 285 286 runtime_s: float = float(time() - tic) 287 if verbose: 288 print(f"[GaussNewton] Completed in {runtime_s * 1000:.02f} ms") 289 290 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 291 292 return self._pack_results( 293 outputs=outputs, 294 errors=errors, 295 runtime_s=runtime_s, 296 error_type="output_error", 297 extra=extra, 298 )
Executes the (recursive) Gauss-Newton OE adaptation loop.
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes sensitivity trajectories in result.extra:
- "x_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{x}(k) \).
- "y_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{y}(k) \).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Output sequence y[k] produced by the current IIR structure.
- errors : ndarray of float, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "output_error".
- extra : dict
Empty unless return_internal_states=True.
26class GaussNewtonGradient(AdaptiveFilter): 27 """ 28 Gradient-based Gauss-Newton (output-error) adaptation for IIR filters (real-valued). 29 30 This method targets the output-error (OE) criterion for IIR adaptive filtering, 31 i.e., it adapts the coefficients to minimize the squared error 32 :math:`e(k) = d(k) - y(k)` where :math:`y(k)` is produced by the *recursive* 33 (IIR) structure. 34 35 Compared to the classical Gauss-Newton approach, this implementation uses a 36 simplified *gradient* update (no matrix inversions) while still leveraging 37 filtered sensitivity signals to approximate how the output changes with 38 respect to pole/zero coefficients. 39 40 This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation 41 is restricted to **real-valued** signals (enforced by ``ensure_real_signals``). 42 43 Parameters 44 ---------- 45 zeros_order : int 46 Numerator order ``N`` (number of zeros). The feedforward part has 47 ``N + 1`` coefficients. 48 poles_order : int 49 Denominator order ``M`` (number of poles). The feedback part has ``M`` 50 coefficients. 51 step_size : float, optional 52 Adaptation step size ``mu``. Default is 1e-3. 53 w_init : array_like of float, optional 54 Optional initial coefficient vector. If provided, it should have shape 55 ``(M + N + 1,)`` following the parameter order described below. If None, 56 the implementation initializes with zeros (and ignores ``w_init``). 57 58 Notes 59 ----- 60 Parameterization (as implemented) 61 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 62 The coefficient vector is arranged as: 63 64 - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``) 65 - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``) 66 67 Regressor and output (as implemented) 68 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the 70 last ``M`` outputs, this implementation forms: 71 72 .. math:: 73 \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T, 74 75 and computes the (recursive) output used by the OE criterion as: 76 77 .. math:: 78 y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k). 79 80 Sensitivity-based gradient factor 81 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 82 The update direction is built from filtered sensitivity signals stored in 83 internal buffers (``x_line_buffer`` and ``y_line_buffer``). The code forms: 84 85 .. math:: 86 \\phi(k) = 87 [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\; 88 -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T, 89 90 and applies the per-sample gradient step: 91 92 .. math:: 93 w(k+1) = w(k) - \\mu\\, \\phi(k)\\, e(k). 94 95 Stability procedure 96 ~~~~~~~~~~~~~~~~~~~ 97 After each update, the feedback coefficients ``w[:M]`` are stabilized by 98 reflecting poles outside the unit circle back inside (pole reflection). 99 100 References 101 ---------- 102 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 103 Implementation*, 3rd ed., Algorithm 10.1 (modified). 104 """ 105 106 supports_complex: bool = False 107 zeros_order: int 108 poles_order: int 109 step_size: float 110 n_coeffs: int 111 y_buffer: np.ndarray 112 x_line_buffer: np.ndarray 113 y_line_buffer: np.ndarray 114 115 def __init__( 116 self, 117 zeros_order: int, 118 poles_order: int, 119 step_size: float = 1e-3, 120 w_init: Optional[Union[np.ndarray, list]] = None, 121 ) -> None: 122 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 123 124 self.zeros_order = int(zeros_order) 125 self.poles_order = int(poles_order) 126 self.step_size = float(step_size) 127 128 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 129 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 130 131 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 132 133 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 134 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 135 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 136 137 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 138 """ 139 Enforces IIR stability by reflecting poles outside the unit circle back inside. 140 Essential for preventing divergence during the gradient descent update. 141 """ 142 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 143 poles: np.ndarray = np.roots(poly_coeffs) 144 mask: np.ndarray = np.abs(poles) > 1.0 145 146 if np.any(mask): 147 poles[mask] = 1.0 / np.conj(poles[mask]) 148 new_poly: np.ndarray = np.poly(poles) 149 return -np.real(new_poly[1:]) 150 return a_coeffs 151 152 @ensure_real_signals 153 @validate_input 154 def optimize( 155 self, 156 input_signal: np.ndarray, 157 desired_signal: np.ndarray, 158 verbose: bool = False, 159 return_internal_states: bool = False, 160 ) -> OptimizationResult: 161 """ 162 Executes the gradient-based Gauss-Newton (OE) adaptation loop. 163 164 Parameters 165 ---------- 166 input_signal : array_like of float 167 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 168 desired_signal : array_like of float 169 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 170 Must have the same length as ``input_signal``. 171 verbose : bool, optional 172 If True, prints the total runtime after completion. 173 return_internal_states : bool, optional 174 If True, includes sensitivity trajectories in ``result.extra``: 175 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 176 scalar sensitivity signal :math:`\\underline{x}(k)` produced by 177 the recursion in the code. 178 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 179 scalar sensitivity signal :math:`\\underline{y}(k)` produced by 180 the recursion in the code. 181 182 Returns 183 ------- 184 OptimizationResult 185 Result object with fields: 186 - outputs : ndarray of float, shape ``(N,)`` 187 Output sequence ``y[k]`` produced by the current IIR structure. 188 - errors : ndarray of float, shape ``(N,)`` 189 Output error sequence ``e[k] = d[k] - y[k]``. 190 - coefficients : ndarray of float 191 Coefficient history recorded by the base class. 192 - error_type : str 193 Set to ``"output_error"``. 194 - extra : dict 195 Empty unless ``return_internal_states=True``. 196 """ 197 tic: float = time() 198 199 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 200 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 201 n_samples: int = int(x.size) 202 203 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 204 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 205 206 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 207 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 208 209 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 210 x_padded[self.zeros_order:] = x 211 212 for k in range(n_samples): 213 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 214 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 215 216 y_k: float = float(np.dot(self.w, regressor)) 217 outputs[k] = y_k 218 e_k: float = float(d[k] - y_k) 219 errors[k] = e_k 220 221 a_coeffs: np.ndarray = self.w[: self.poles_order] 222 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 223 224 y_line_k: float = 0.0 225 if self.poles_order > 0: 226 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 227 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 228 229 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 230 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 231 232 if return_internal_states and x_line_track is not None: 233 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 234 235 phi: np.ndarray = np.concatenate( 236 ( 237 self.y_line_buffer[: self.poles_order], 238 -self.x_line_buffer[: self.zeros_order + 1], 239 ) 240 ) 241 242 self.w -= self.step_size * phi * e_k 243 244 if self.poles_order > 0: 245 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 246 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 247 248 self._record_history() 249 250 runtime_s: float = float(time() - tic) 251 if verbose: 252 print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms") 253 254 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 255 256 return self._pack_results( 257 outputs=outputs, 258 errors=errors, 259 runtime_s=runtime_s, 260 error_type="output_error", 261 extra=extra, 262 )
Gradient-based Gauss-Newton (output-error) adaptation for IIR filters (real-valued).
This method targets the output-error (OE) criterion for IIR adaptive filtering, i.e., it adapts the coefficients to minimize the squared error \( e(k) = d(k) - y(k) \) where \( y(k) \) is produced by the recursive (IIR) structure.
Compared to the classical Gauss-Newton approach, this implementation uses a simplified gradient update (no matrix inversions) while still leveraging filtered sensitivity signals to approximate how the output changes with respect to pole/zero coefficients.
This is a modified version of Diniz (3rd ed., Alg. 10.1). The implementation
is restricted to real-valued signals (enforced by ensure_real_signals).
Parameters
zeros_order : int
Numerator order N (number of zeros). The feedforward part has
N + 1 coefficients.
poles_order : int
Denominator order M (number of poles). The feedback part has M
coefficients.
step_size : float, optional
Adaptation step size mu. Default is 1e-3.
w_init : array_like of float, optional
Optional initial coefficient vector. If provided, it should have shape
(M + N + 1,) following the parameter order described below. If None,
the implementation initializes with zeros (and ignores w_init).
Notes
Parameterization (as implemented)
~~~~~~~~~
The coefficient vector is arranged as:
w[:M]: feedback (pole) coefficients (often denoteda)w[M:]: feedforward (zero) coefficients (often denotedb)
Regressor and output (as implemented)
~~~~~~~~~
With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the
last M outputs, this implementation forms:
$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$
and computes the (recursive) output used by the OE criterion as:
$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k).$$
Sensitivity-based gradient factor
~~~~~~~~~
The update direction is built from filtered sensitivity signals stored in
internal buffers (x_line_buffer and y_line_buffer). The code forms:
$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T,$$
and applies the per-sample gradient step:
$$w(k+1) = w(k) - \mu\, \phi(k)\, e(k).$$
Stability procedure
~~~~~~~
After each update, the feedback coefficients w[:M] are stabilized by
reflecting poles outside the unit circle back inside (pole reflection).
References
115 def __init__( 116 self, 117 zeros_order: int, 118 poles_order: int, 119 step_size: float = 1e-3, 120 w_init: Optional[Union[np.ndarray, list]] = None, 121 ) -> None: 122 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 123 124 self.zeros_order = int(zeros_order) 125 self.poles_order = int(poles_order) 126 self.step_size = float(step_size) 127 128 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 129 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 130 131 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 132 133 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 134 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 135 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
152 @ensure_real_signals 153 @validate_input 154 def optimize( 155 self, 156 input_signal: np.ndarray, 157 desired_signal: np.ndarray, 158 verbose: bool = False, 159 return_internal_states: bool = False, 160 ) -> OptimizationResult: 161 """ 162 Executes the gradient-based Gauss-Newton (OE) adaptation loop. 163 164 Parameters 165 ---------- 166 input_signal : array_like of float 167 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 168 desired_signal : array_like of float 169 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 170 Must have the same length as ``input_signal``. 171 verbose : bool, optional 172 If True, prints the total runtime after completion. 173 return_internal_states : bool, optional 174 If True, includes sensitivity trajectories in ``result.extra``: 175 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 176 scalar sensitivity signal :math:`\\underline{x}(k)` produced by 177 the recursion in the code. 178 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 179 scalar sensitivity signal :math:`\\underline{y}(k)` produced by 180 the recursion in the code. 181 182 Returns 183 ------- 184 OptimizationResult 185 Result object with fields: 186 - outputs : ndarray of float, shape ``(N,)`` 187 Output sequence ``y[k]`` produced by the current IIR structure. 188 - errors : ndarray of float, shape ``(N,)`` 189 Output error sequence ``e[k] = d[k] - y[k]``. 190 - coefficients : ndarray of float 191 Coefficient history recorded by the base class. 192 - error_type : str 193 Set to ``"output_error"``. 194 - extra : dict 195 Empty unless ``return_internal_states=True``. 196 """ 197 tic: float = time() 198 199 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 200 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 201 n_samples: int = int(x.size) 202 203 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 204 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 205 206 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 207 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 208 209 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 210 x_padded[self.zeros_order:] = x 211 212 for k in range(n_samples): 213 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 214 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 215 216 y_k: float = float(np.dot(self.w, regressor)) 217 outputs[k] = y_k 218 e_k: float = float(d[k] - y_k) 219 errors[k] = e_k 220 221 a_coeffs: np.ndarray = self.w[: self.poles_order] 222 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 223 224 y_line_k: float = 0.0 225 if self.poles_order > 0: 226 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 227 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 228 229 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 230 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 231 232 if return_internal_states and x_line_track is not None: 233 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 234 235 phi: np.ndarray = np.concatenate( 236 ( 237 self.y_line_buffer[: self.poles_order], 238 -self.x_line_buffer[: self.zeros_order + 1], 239 ) 240 ) 241 242 self.w -= self.step_size * phi * e_k 243 244 if self.poles_order > 0: 245 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 246 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 247 248 self._record_history() 249 250 runtime_s: float = float(time() - tic) 251 if verbose: 252 print(f"[GaussNewtonGradient] Completed in {runtime_s * 1000:.02f} ms") 253 254 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 255 256 return self._pack_results( 257 outputs=outputs, 258 errors=errors, 259 runtime_s=runtime_s, 260 error_type="output_error", 261 extra=extra, 262 )
Executes the gradient-based Gauss-Newton (OE) adaptation loop.
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes sensitivity trajectories in result.extra:
- "x_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{x}(k) \) produced by
the recursion in the code.
- "y_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{y}(k) \) produced by
the recursion in the code.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Output sequence y[k] produced by the current IIR structure.
- errors : ndarray of float, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "output_error".
- extra : dict
Empty unless return_internal_states=True.
27class RLSIIR(AdaptiveFilter): 28 """ 29 RLS-like output-error adaptation for IIR filters (real-valued). 30 31 This algorithm applies an RLS-style recursion to the IIR output-error (OE) 32 problem. Rather than minimizing a linear FIR error, it uses filtered 33 sensitivity signals to build a Jacobian-like vector :math:`\\phi(k)` that 34 approximates how the IIR output changes with respect to the pole/zero 35 parameters. The inverse correlation matrix (named ``Sd``) scales the update, 36 typically yielding faster convergence than plain gradient methods. 37 38 The implementation corresponds to a modified form of Diniz (3rd ed., 39 Alg. 10.1) and is restricted to **real-valued** signals (enforced by 40 ``ensure_real_signals``). 41 42 Parameters 43 ---------- 44 zeros_order : int 45 Numerator order ``N`` (number of zeros). The feedforward part has 46 ``N + 1`` coefficients. 47 poles_order : int 48 Denominator order ``M`` (number of poles). The feedback part has ``M`` 49 coefficients. 50 forgetting_factor : float, optional 51 Exponential forgetting factor ``lambda`` used in the recursive update of 52 ``Sd``. Typical values are in ``[0.9, 1.0]``. Default is 0.99. 53 delta : float, optional 54 Positive regularization parameter for initializing ``Sd`` as 55 :math:`S(0) = \\delta^{-1} I`. Default is 1e-3. 56 w_init : array_like of float, optional 57 Optional initial coefficient vector. If provided, it should have shape 58 ``(M + N + 1,)`` following the parameter order described below. If None, 59 the implementation initializes with zeros (and ignores ``w_init``). 60 61 Notes 62 ----- 63 Parameterization (as implemented) 64 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 65 The coefficient vector is arranged as: 66 67 - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``) 68 - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``) 69 70 OE output and error (as implemented) 71 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 72 With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the 73 last ``M`` outputs, the code forms: 74 75 .. math:: 76 \\varphi(k) = [y(k-1), \\ldots, y(k-M),\\; x(k), \\ldots, x(k-N)]^T, 77 78 computes: 79 80 .. math:: 81 y(k) = w^T(k)\\, \\varphi(k), \\qquad e(k) = d(k) - y(k), 82 83 and reports ``e(k)`` as the output-error sequence. 84 85 Sensitivity vector and RLS recursion 86 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 Filtered sensitivity signals stored in internal buffers (``x_line_buffer`` 88 and ``y_line_buffer``) are used to build: 89 90 .. math:: 91 \\phi(k) = 92 [\\underline{y}(k-1), \\ldots, \\underline{y}(k-M),\\; 93 -\\underline{x}(k), \\ldots, -\\underline{x}(k-N)]^T. 94 95 The inverse correlation matrix ``Sd`` is updated in an RLS-like manner: 96 97 .. math:: 98 \\psi(k) = Sd(k-1)\\, \\phi(k), \\quad 99 \\text{den}(k) = \\lambda + \\phi^T(k)\\, \\psi(k), 100 101 .. math:: 102 Sd(k) = \\frac{1}{\\lambda} 103 \\left(Sd(k-1) - \\frac{\\psi(k)\\psi^T(k)}{\\text{den}(k)}\\right). 104 105 The coefficient update used here is: 106 107 .. math:: 108 w(k+1) = w(k) - Sd(k)\\, \\phi(k)\\, e(k). 109 110 (Note: this implementation does not expose an additional step-size parameter; 111 the effective step is governed by ``Sd``.) 112 113 Stability procedure 114 ~~~~~~~~~~~~~~~~~~~ 115 After each update, the feedback coefficients ``w[:M]`` are stabilized by 116 reflecting poles outside the unit circle back inside (pole reflection). 117 118 References 119 ---------- 120 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 121 Implementation*, 3rd ed., Algorithm 10.1 (modified). 122 """ 123 124 supports_complex: bool = False 125 zeros_order: int 126 poles_order: int 127 forgetting_factor: float 128 delta: float 129 n_coeffs: int 130 Sd: np.ndarray 131 y_buffer: np.ndarray 132 x_line_buffer: np.ndarray 133 y_line_buffer: np.ndarray 134 135 def __init__( 136 self, 137 zeros_order: int, 138 poles_order: int, 139 forgetting_factor: float = 0.99, 140 delta: float = 1e-3, 141 w_init: Optional[Union[np.ndarray, list]] = None, 142 ) -> None: 143 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 144 145 self.zeros_order = int(zeros_order) 146 self.poles_order = int(poles_order) 147 self.forgetting_factor = float(forgetting_factor) 148 self.delta = float(delta) 149 150 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 151 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 152 153 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 154 155 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 156 157 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 158 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 159 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64) 160 161 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 162 """ 163 Enforces IIR stability by reflecting poles outside the unit circle back inside. 164 """ 165 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 166 poles: np.ndarray = np.roots(poly_coeffs) 167 mask: np.ndarray = np.abs(poles) > 1.0 168 if np.any(mask): 169 poles[mask] = 1.0 / np.conj(poles[mask]) 170 new_poly: np.ndarray = np.poly(poles) 171 return -np.real(new_poly[1:]) 172 return a_coeffs 173 174 @ensure_real_signals 175 @validate_input 176 def optimize( 177 self, 178 input_signal: np.ndarray, 179 desired_signal: np.ndarray, 180 verbose: bool = False, 181 return_internal_states: bool = False, 182 ) -> OptimizationResult: 183 """ 184 Executes the RLS-IIR (OE) adaptation loop. 185 186 Parameters 187 ---------- 188 input_signal : array_like of float 189 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 190 desired_signal : array_like of float 191 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 192 Must have the same length as ``input_signal``. 193 verbose : bool, optional 194 If True, prints the total runtime after completion. 195 return_internal_states : bool, optional 196 If True, includes sensitivity trajectories in ``result.extra``: 197 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 198 scalar sensitivity signal :math:`\\underline{x}(k)`. 199 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 200 scalar sensitivity signal :math:`\\underline{y}(k)`. 201 202 Returns 203 ------- 204 OptimizationResult 205 Result object with fields: 206 - outputs : ndarray of float, shape ``(N,)`` 207 Output sequence ``y[k]`` produced by the current IIR structure. 208 - errors : ndarray of float, shape ``(N,)`` 209 Output error sequence ``e[k] = d[k] - y[k]``. 210 - coefficients : ndarray of float 211 Coefficient history recorded by the base class. 212 - error_type : str 213 Set to ``"output_error"``. 214 - extra : dict 215 Empty unless ``return_internal_states=True``. 216 """ 217 tic: float = time() 218 219 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 220 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 221 n_samples: int = int(x.size) 222 223 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 224 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 225 226 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 227 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 228 229 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 230 x_padded[self.zeros_order:] = x 231 232 233 234 for k in range(n_samples): 235 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 236 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 237 238 y_k: float = float(np.dot(self.w, regressor)) 239 outputs[k] = y_k 240 e_k: float = float(d[k] - y_k) 241 errors[k] = e_k 242 243 a_coeffs: np.ndarray = self.w[: self.poles_order] 244 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 245 246 y_line_k: float = 0.0 247 if self.poles_order > 0: 248 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 249 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 250 251 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 252 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 253 254 if return_internal_states and x_line_track is not None: 255 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 256 257 phi: np.ndarray = np.concatenate( 258 ( 259 self.y_line_buffer[: self.poles_order], 260 -self.x_line_buffer[: self.zeros_order + 1], 261 ) 262 ) 263 264 psi: np.ndarray = self.Sd @ phi 265 den: float = float(self.forgetting_factor + phi.T @ psi) 266 267 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 268 269 self.w -= (self.Sd @ phi) * e_k 270 271 if self.poles_order > 0: 272 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 273 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 274 275 self._record_history() 276 277 runtime_s: float = float(time() - tic) 278 if verbose: 279 print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms") 280 281 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 282 283 return self._pack_results( 284 outputs=outputs, 285 errors=errors, 286 runtime_s=runtime_s, 287 error_type="output_error", 288 extra=extra, 289 )
RLS-like output-error adaptation for IIR filters (real-valued).
This algorithm applies an RLS-style recursion to the IIR output-error (OE)
problem. Rather than minimizing a linear FIR error, it uses filtered
sensitivity signals to build a Jacobian-like vector \( \phi(k) \) that
approximates how the IIR output changes with respect to the pole/zero
parameters. The inverse correlation matrix (named Sd) scales the update,
typically yielding faster convergence than plain gradient methods.
The implementation corresponds to a modified form of Diniz (3rd ed.,
Alg. 10.1) and is restricted to real-valued signals (enforced by
ensure_real_signals).
Parameters
zeros_order : int
Numerator order N (number of zeros). The feedforward part has
N + 1 coefficients.
poles_order : int
Denominator order M (number of poles). The feedback part has M
coefficients.
forgetting_factor : float, optional
Exponential forgetting factor lambda used in the recursive update of
Sd. Typical values are in [0.9, 1.0]. Default is 0.99.
delta : float, optional
Positive regularization parameter for initializing Sd as
\( S(0) = \delta^{-1} I \). Default is 1e-3.
w_init : array_like of float, optional
Optional initial coefficient vector. If provided, it should have shape
(M + N + 1,) following the parameter order described below. If None,
the implementation initializes with zeros (and ignores w_init).
Notes
Parameterization (as implemented)
~~~~~~~~~
The coefficient vector is arranged as:
w[:M]: feedback (pole) coefficients (often denoteda)w[M:]: feedforward (zero) coefficients (often denotedb)
OE output and error (as implemented)
~~~~~~~~
With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the
last M outputs, the code forms:
$$\varphi(k) = [y(k-1), \ldots, y(k-M),\; x(k), \ldots, x(k-N)]^T,$$
computes:
$$y(k) = w^T(k)\, \varphi(k), \qquad e(k) = d(k) - y(k),$$
and reports e(k) as the output-error sequence.
Sensitivity vector and RLS recursion
~~~~~~~~
Filtered sensitivity signals stored in internal buffers (x_line_buffer
and y_line_buffer) are used to build:
$$\phi(k) = [\underline{y}(k-1), \ldots, \underline{y}(k-M),\; -\underline{x}(k), \ldots, -\underline{x}(k-N)]^T.$$
The inverse correlation matrix Sd is updated in an RLS-like manner:
$$\psi(k) = Sd(k-1)\, \phi(k), \quad \text{den}(k) = \lambda + \phi^T(k)\, \psi(k),$$
$$Sd(k) = \frac{1}{\lambda} \left(Sd(k-1) - \frac{\psi(k)\psi^T(k)}{\text{den}(k)}\right).$$
The coefficient update used here is:
$$w(k+1) = w(k) - Sd(k)\, \phi(k)\, e(k).$$
(Note: this implementation does not expose an additional step-size parameter;
the effective step is governed by Sd.)
Stability procedure
~~~~~~~
After each update, the feedback coefficients w[:M] are stabilized by
reflecting poles outside the unit circle back inside (pole reflection).
References
135 def __init__( 136 self, 137 zeros_order: int, 138 poles_order: int, 139 forgetting_factor: float = 0.99, 140 delta: float = 1e-3, 141 w_init: Optional[Union[np.ndarray, list]] = None, 142 ) -> None: 143 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 144 145 self.zeros_order = int(zeros_order) 146 self.poles_order = int(poles_order) 147 self.forgetting_factor = float(forgetting_factor) 148 self.delta = float(delta) 149 150 self.n_coeffs = int(self.zeros_order + self.poles_order + 1) 151 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 152 153 self.Sd = (1.0 / self.delta) * np.eye(self.n_coeffs, dtype=np.float64) 154 155 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 156 157 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order)) 158 self.x_line_buffer = np.zeros(max_buffer, dtype=np.float64) 159 self.y_line_buffer = np.zeros(max_buffer, dtype=np.float64)
174 @ensure_real_signals 175 @validate_input 176 def optimize( 177 self, 178 input_signal: np.ndarray, 179 desired_signal: np.ndarray, 180 verbose: bool = False, 181 return_internal_states: bool = False, 182 ) -> OptimizationResult: 183 """ 184 Executes the RLS-IIR (OE) adaptation loop. 185 186 Parameters 187 ---------- 188 input_signal : array_like of float 189 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 190 desired_signal : array_like of float 191 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 192 Must have the same length as ``input_signal``. 193 verbose : bool, optional 194 If True, prints the total runtime after completion. 195 return_internal_states : bool, optional 196 If True, includes sensitivity trajectories in ``result.extra``: 197 - ``"x_sensitivity"``: ndarray of float, shape ``(N,)`` with the 198 scalar sensitivity signal :math:`\\underline{x}(k)`. 199 - ``"y_sensitivity"``: ndarray of float, shape ``(N,)`` with the 200 scalar sensitivity signal :math:`\\underline{y}(k)`. 201 202 Returns 203 ------- 204 OptimizationResult 205 Result object with fields: 206 - outputs : ndarray of float, shape ``(N,)`` 207 Output sequence ``y[k]`` produced by the current IIR structure. 208 - errors : ndarray of float, shape ``(N,)`` 209 Output error sequence ``e[k] = d[k] - y[k]``. 210 - coefficients : ndarray of float 211 Coefficient history recorded by the base class. 212 - error_type : str 213 Set to ``"output_error"``. 214 - extra : dict 215 Empty unless ``return_internal_states=True``. 216 """ 217 tic: float = time() 218 219 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 220 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 221 n_samples: int = int(x.size) 222 223 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 224 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 225 226 x_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 227 y_line_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=np.float64) if return_internal_states else None 228 229 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 230 x_padded[self.zeros_order:] = x 231 232 233 234 for k in range(n_samples): 235 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 236 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 237 238 y_k: float = float(np.dot(self.w, regressor)) 239 outputs[k] = y_k 240 e_k: float = float(d[k] - y_k) 241 errors[k] = e_k 242 243 a_coeffs: np.ndarray = self.w[: self.poles_order] 244 x_line_k: float = float(x[k] + np.dot(a_coeffs, self.x_line_buffer[: self.poles_order])) 245 246 y_line_k: float = 0.0 247 if self.poles_order > 0: 248 prev_y: float = float(outputs[k - 1]) if k > 0 else 0.0 249 y_line_k = float(-prev_y + np.dot(a_coeffs, self.y_line_buffer[: self.poles_order])) 250 251 self.x_line_buffer = np.concatenate(([x_line_k], self.x_line_buffer[:-1])) 252 self.y_line_buffer = np.concatenate(([y_line_k], self.y_line_buffer[:-1])) 253 254 if return_internal_states and x_line_track is not None: 255 x_line_track[k], y_line_track[k] = x_line_k, y_line_k 256 257 phi: np.ndarray = np.concatenate( 258 ( 259 self.y_line_buffer[: self.poles_order], 260 -self.x_line_buffer[: self.zeros_order + 1], 261 ) 262 ) 263 264 psi: np.ndarray = self.Sd @ phi 265 den: float = float(self.forgetting_factor + phi.T @ psi) 266 267 self.Sd = (1.0 / self.forgetting_factor) * (self.Sd - np.outer(psi, psi) / den) 268 269 self.w -= (self.Sd @ phi) * e_k 270 271 if self.poles_order > 0: 272 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 273 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 274 275 self._record_history() 276 277 runtime_s: float = float(time() - tic) 278 if verbose: 279 print(f"[RLSIIR] Completed in {runtime_s * 1000:.02f} ms") 280 281 extra = {"x_sensitivity": x_line_track, "y_sensitivity": y_line_track} if return_internal_states else {} 282 283 return self._pack_results( 284 outputs=outputs, 285 errors=errors, 286 runtime_s=runtime_s, 287 error_type="output_error", 288 extra=extra, 289 )
Executes the RLS-IIR (OE) adaptation loop.
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes sensitivity trajectories in result.extra:
- "x_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{x}(k) \).
- "y_sensitivity": ndarray of float, shape (N,) with the
scalar sensitivity signal \( \underline{y}(k) \).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Output sequence y[k] produced by the current IIR structure.
- errors : ndarray of float, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "output_error".
- extra : dict
Empty unless return_internal_states=True.
26class SteiglitzMcBride(AdaptiveFilter): 27 """ 28 Steiglitz–McBride (SM) adaptive algorithm for IIR filters (real-valued). 29 30 The Steiglitz–McBride method is an iterative output-error (OE) approach 31 implemented via a sequence of *prefiltered equation-error* updates. The key 32 idea is to prefilter both the input ``x[k]`` and the desired signal ``d[k]`` 33 by the inverse of the current denominator estimate, :math:`1/A(z)`. This 34 transforms the OE problem into a (locally) more linear regression and often 35 improves convergence compared to directly minimizing the OE surface. 36 37 This implementation follows the structure of Diniz (3rd ed., Alg. 10.4), 38 using per-sample prefiltering recursions and a gradient-type update driven 39 by the *filtered equation error*. It is restricted to **real-valued** 40 signals (enforced by ``ensure_real_signals``). 41 42 Parameters 43 ---------- 44 zeros_order : int 45 Numerator order ``N`` (number of zeros). The feedforward part has 46 ``N + 1`` coefficients. 47 poles_order : int 48 Denominator order ``M`` (number of poles). The feedback part has ``M`` 49 coefficients. 50 step_size : float, optional 51 Adaptation step size ``mu`` for the SM update. Default is 1e-3. 52 w_init : array_like of float, optional 53 Optional initial coefficient vector. If provided, it should have shape 54 ``(M + N + 1,)`` following the parameter order described below. If None, 55 the implementation initializes with zeros (and ignores ``w_init``). 56 57 Notes 58 ----- 59 Parameterization (as implemented) 60 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 The coefficient vector is arranged as: 62 63 - ``w[:M]``: feedback (pole) coefficients (often denoted ``a``) 64 - ``w[M:]``: feedforward (zero) coefficients (often denoted ``b``) 65 66 "True" IIR output and output error 67 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 68 With ``reg_x = [x(k), x(k-1), ..., x(k-N)]^T`` and an internal buffer of the 69 last ``M`` outputs, the method computes a "true IIR" output: 70 71 .. math:: 72 y(k) = w^T(k)\\, [y(k-1),\\ldots,y(k-M),\\; x(k),\\ldots,x(k-N)]^T, 73 74 and the reported output error: 75 76 .. math:: 77 e(k) = d(k) - y(k). 78 79 Prefiltering by 1/A(z) (as implemented) 80 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 81 Let ``a`` be the current feedback coefficient vector. The code implements 82 the prefilter :math:`1/A(z)` through the recursions: 83 84 .. math:: 85 x_f(k) = x(k) + a^T x_f(k-1:k-M), \\qquad 86 d_f(k) = d(k) + a^T d_f(k-1:k-M), 87 88 where the past filtered values are stored in ``xf_buffer`` and ``df_buffer``. 89 90 Filtered equation error and update 91 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 92 The adaptation uses an auxiliary regressor built from the filtered signals 93 (named ``regressor_s`` in the code). For ``M > 0``: 94 95 .. math:: 96 \\varphi_s(k) = [d_f(k-1),\\ldots,d_f(k-M),\\; x_f(k),\\ldots,x_f(k-N)]^T, 97 98 and for ``M = 0`` it reduces to the FIR case using only 99 ``[x_f(k),\\ldots,x_f(k-N)]``. 100 101 The filtered equation error is: 102 103 .. math:: 104 e_s(k) = d_f(k) - w^T(k)\\, \\varphi_s(k), 105 106 and the coefficient update used here is: 107 108 .. math:: 109 w(k+1) = w(k) + 2\\mu\\, \\varphi_s(k)\\, e_s(k). 110 111 Stability procedure 112 ~~~~~~~~~~~~~~~~~~~ 113 After each update (for ``M > 0``), the feedback coefficients ``w[:M]`` are 114 stabilized by reflecting poles outside the unit circle back inside (pole 115 reflection). This helps keep the prefilter :math:`1/A(z)` stable. 116 117 References 118 ---------- 119 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 120 Implementation*, 3rd ed., Algorithm 10.4. 121 """ 122 123 supports_complex: bool = False 124 zeros_order: int 125 poles_order: int 126 step_size: float 127 n_coeffs: int 128 y_buffer: np.ndarray 129 xf_buffer: np.ndarray 130 df_buffer: np.ndarray 131 132 def __init__( 133 self, 134 zeros_order: int, 135 poles_order: int, 136 step_size: float = 1e-3, 137 w_init: Optional[Union[np.ndarray, list]] = None, 138 ) -> None: 139 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 140 141 self.zeros_order = int(zeros_order) 142 self.poles_order = int(poles_order) 143 self.step_size = float(step_size) 144 145 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 146 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 147 148 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 149 150 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1)) 151 self.xf_buffer = np.zeros(max_buffer, dtype=np.float64) 152 self.df_buffer = np.zeros(max_buffer, dtype=np.float64) 153 154 def _stability_procedure(self, a_coeffs: np.ndarray) -> np.ndarray: 155 """ 156 Reflects poles outside the unit circle back inside to ensure 157 the prefilter $1/A(z)$ remains stable. 158 """ 159 poly_coeffs: np.ndarray = np.concatenate(([1.0], -a_coeffs)) 160 poles: np.ndarray = np.roots(poly_coeffs) 161 mask: np.ndarray = np.abs(poles) > 1.0 162 if np.any(mask): 163 poles[mask] = 1.0 / np.conj(poles[mask]) 164 new_poly: np.ndarray = np.poly(poles) 165 return -np.real(new_poly[1:]) 166 return a_coeffs 167 168 @ensure_real_signals 169 @validate_input 170 def optimize( 171 self, 172 input_signal: np.ndarray, 173 desired_signal: np.ndarray, 174 verbose: bool = False, 175 return_internal_states: bool = False, 176 ) -> OptimizationResult: 177 """ 178 Executes the Steiglitz–McBride adaptation loop. 179 180 Parameters 181 ---------- 182 input_signal : array_like of float 183 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 184 desired_signal : array_like of float 185 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 186 Must have the same length as ``input_signal``. 187 verbose : bool, optional 188 If True, prints the total runtime after completion. 189 return_internal_states : bool, optional 190 If True, includes the filtered equation-error trajectory in 191 ``result.extra["auxiliary_error"]`` with shape ``(N,)``. 192 193 Returns 194 ------- 195 OptimizationResult 196 Result object with fields: 197 - outputs : ndarray of float, shape ``(N,)`` 198 "True IIR" output sequence ``y[k]``. 199 - errors : ndarray of float, shape ``(N,)`` 200 Output error sequence ``e[k] = d[k] - y[k]``. 201 - coefficients : ndarray of float 202 Coefficient history recorded by the base class. 203 - error_type : str 204 Set to ``"a_posteriori"`` (the update is driven by the filtered 205 equation error). 206 - extra : dict 207 Empty unless ``return_internal_states=True``. 208 """ 209 tic: float = time() 210 211 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 212 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 213 n_samples: int = int(x.size) 214 215 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 216 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 217 errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64) 218 219 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 220 x_padded[self.zeros_order:] = x 221 222 for k in range(n_samples): 223 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 224 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 225 226 y_k: float = float(np.dot(self.w, regressor)) 227 outputs[k] = y_k 228 errors[k] = float(d[k] - y_k) 229 230 a_coeffs: np.ndarray = self.w[: self.poles_order] 231 232 xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order])) 233 df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order])) 234 235 self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1])) 236 self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1])) 237 238 if self.poles_order == 0: 239 regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1] 240 else: 241 regressor_s = np.concatenate( 242 ( 243 self.df_buffer[1 : self.poles_order + 1], 244 self.xf_buffer[: self.zeros_order + 1], 245 ) 246 ) 247 248 e_s_k: float = float(df_k - np.dot(self.w, regressor_s)) 249 errors_s[k] = e_s_k 250 251 self.w += 2.0 * self.step_size * regressor_s * e_s_k 252 253 if self.poles_order > 0: 254 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 255 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 256 257 self._record_history() 258 259 runtime_s: float = float(time() - tic) 260 if verbose: 261 print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms") 262 263 extra = {"auxiliary_error": errors_s} if return_internal_states else {} 264 265 return self._pack_results( 266 outputs=outputs, 267 errors=errors, 268 runtime_s=runtime_s, 269 error_type="a_posteriori", 270 extra=extra, 271 )
Steiglitz–McBride (SM) adaptive algorithm for IIR filters (real-valued).
The Steiglitz–McBride method is an iterative output-error (OE) approach
implemented via a sequence of prefiltered equation-error updates. The key
idea is to prefilter both the input x[k] and the desired signal d[k]
by the inverse of the current denominator estimate, \( 1/A(z) \). This
transforms the OE problem into a (locally) more linear regression and often
improves convergence compared to directly minimizing the OE surface.
This implementation follows the structure of Diniz (3rd ed., Alg. 10.4),
using per-sample prefiltering recursions and a gradient-type update driven
by the filtered equation error. It is restricted to real-valued
signals (enforced by ensure_real_signals).
Parameters
zeros_order : int
Numerator order N (number of zeros). The feedforward part has
N + 1 coefficients.
poles_order : int
Denominator order M (number of poles). The feedback part has M
coefficients.
step_size : float, optional
Adaptation step size mu for the SM update. Default is 1e-3.
w_init : array_like of float, optional
Optional initial coefficient vector. If provided, it should have shape
(M + N + 1,) following the parameter order described below. If None,
the implementation initializes with zeros (and ignores w_init).
Notes
Parameterization (as implemented)
~~~~~~~~~
The coefficient vector is arranged as:
w[:M]: feedback (pole) coefficients (often denoteda)w[M:]: feedforward (zero) coefficients (often denotedb)
"True" IIR output and output error
~~~~~~~~~~
With reg_x = [x(k), x(k-1), ..., x(k-N)]^T and an internal buffer of the
last M outputs, the method computes a "true IIR" output:
$$y(k) = w^T(k)\, [y(k-1),\ldots,y(k-M),\; x(k),\ldots,x(k-N)]^T,$$
and the reported output error:
$$e(k) = d(k) - y(k).$$
Prefiltering by 1/A(z) (as implemented)
~~~~~~~~~~~
Let a be the current feedback coefficient vector. The code implements
the prefilter \( 1/A(z) \) through the recursions:
$$x_f(k) = x(k) + a^T x_f(k-1:k-M), \qquad d_f(k) = d(k) + a^T d_f(k-1:k-M),$$
where the past filtered values are stored in xf_buffer and df_buffer.
Filtered equation error and update
~~~~~~~~~~
The adaptation uses an auxiliary regressor built from the filtered signals
(named regressor_s in the code). For M > 0:
$$\varphi_s(k) = [d_f(k-1),\ldots,d_f(k-M),\; x_f(k),\ldots,x_f(k-N)]^T,$$
and for M = 0 it reduces to the FIR case using only
[x_f(k),\ldots,x_f(k-N)].
The filtered equation error is:
$$e_s(k) = d_f(k) - w^T(k)\, \varphi_s(k),$$
and the coefficient update used here is:
$$w(k+1) = w(k) + 2\mu\, \varphi_s(k)\, e_s(k).$$
Stability procedure
~~~~~~~
After each update (for M > 0), the feedback coefficients w[:M] are
stabilized by reflecting poles outside the unit circle back inside (pole
reflection). This helps keep the prefilter \( 1/A(z) \) stable.
References
132 def __init__( 133 self, 134 zeros_order: int, 135 poles_order: int, 136 step_size: float = 1e-3, 137 w_init: Optional[Union[np.ndarray, list]] = None, 138 ) -> None: 139 super().__init__(filter_order=zeros_order + poles_order, w_init=w_init) 140 141 self.zeros_order = int(zeros_order) 142 self.poles_order = int(poles_order) 143 self.step_size = float(step_size) 144 145 self.n_coeffs = int(self.zeros_order + 1 + self.poles_order) 146 self.w = np.zeros(self.n_coeffs, dtype=np.float64) 147 148 self.y_buffer = np.zeros(self.poles_order, dtype=np.float64) 149 150 max_buffer: int = int(max(self.zeros_order + 1, self.poles_order + 1)) 151 self.xf_buffer = np.zeros(max_buffer, dtype=np.float64) 152 self.df_buffer = np.zeros(max_buffer, dtype=np.float64)
168 @ensure_real_signals 169 @validate_input 170 def optimize( 171 self, 172 input_signal: np.ndarray, 173 desired_signal: np.ndarray, 174 verbose: bool = False, 175 return_internal_states: bool = False, 176 ) -> OptimizationResult: 177 """ 178 Executes the Steiglitz–McBride adaptation loop. 179 180 Parameters 181 ---------- 182 input_signal : array_like of float 183 Real-valued input sequence ``x[k]`` with shape ``(N,)``. 184 desired_signal : array_like of float 185 Real-valued desired/reference sequence ``d[k]`` with shape ``(N,)``. 186 Must have the same length as ``input_signal``. 187 verbose : bool, optional 188 If True, prints the total runtime after completion. 189 return_internal_states : bool, optional 190 If True, includes the filtered equation-error trajectory in 191 ``result.extra["auxiliary_error"]`` with shape ``(N,)``. 192 193 Returns 194 ------- 195 OptimizationResult 196 Result object with fields: 197 - outputs : ndarray of float, shape ``(N,)`` 198 "True IIR" output sequence ``y[k]``. 199 - errors : ndarray of float, shape ``(N,)`` 200 Output error sequence ``e[k] = d[k] - y[k]``. 201 - coefficients : ndarray of float 202 Coefficient history recorded by the base class. 203 - error_type : str 204 Set to ``"a_posteriori"`` (the update is driven by the filtered 205 equation error). 206 - extra : dict 207 Empty unless ``return_internal_states=True``. 208 """ 209 tic: float = time() 210 211 x: np.ndarray = np.asarray(input_signal, dtype=np.float64) 212 d: np.ndarray = np.asarray(desired_signal, dtype=np.float64) 213 n_samples: int = int(x.size) 214 215 outputs: np.ndarray = np.zeros(n_samples, dtype=np.float64) 216 errors: np.ndarray = np.zeros(n_samples, dtype=np.float64) 217 errors_s: np.ndarray = np.zeros(n_samples, dtype=np.float64) 218 219 x_padded: np.ndarray = np.zeros(n_samples + self.zeros_order, dtype=np.float64) 220 x_padded[self.zeros_order:] = x 221 222 for k in range(n_samples): 223 reg_x: np.ndarray = x_padded[k : k + self.zeros_order + 1][::-1] 224 regressor: np.ndarray = np.concatenate((self.y_buffer, reg_x)) 225 226 y_k: float = float(np.dot(self.w, regressor)) 227 outputs[k] = y_k 228 errors[k] = float(d[k] - y_k) 229 230 a_coeffs: np.ndarray = self.w[: self.poles_order] 231 232 xf_k: float = float(x[k] + np.dot(a_coeffs, self.xf_buffer[: self.poles_order])) 233 df_k: float = float(d[k] + np.dot(a_coeffs, self.df_buffer[: self.poles_order])) 234 235 self.xf_buffer = np.concatenate(([xf_k], self.xf_buffer[:-1])) 236 self.df_buffer = np.concatenate(([df_k], self.df_buffer[:-1])) 237 238 if self.poles_order == 0: 239 regressor_s: np.ndarray = self.xf_buffer[: self.zeros_order + 1] 240 else: 241 regressor_s = np.concatenate( 242 ( 243 self.df_buffer[1 : self.poles_order + 1], 244 self.xf_buffer[: self.zeros_order + 1], 245 ) 246 ) 247 248 e_s_k: float = float(df_k - np.dot(self.w, regressor_s)) 249 errors_s[k] = e_s_k 250 251 self.w += 2.0 * self.step_size * regressor_s * e_s_k 252 253 if self.poles_order > 0: 254 self.w[: self.poles_order] = self._stability_procedure(self.w[: self.poles_order]) 255 self.y_buffer = np.concatenate(([y_k], self.y_buffer[:-1])) 256 257 self._record_history() 258 259 runtime_s: float = float(time() - tic) 260 if verbose: 261 print(f"[SteiglitzMcBride] Completed in {runtime_s * 1000:.02f} ms") 262 263 extra = {"auxiliary_error": errors_s} if return_internal_states else {} 264 265 return self._pack_results( 266 outputs=outputs, 267 errors=errors, 268 runtime_s=runtime_s, 269 error_type="a_posteriori", 270 extra=extra, 271 )
Executes the Steiglitz–McBride adaptation loop.
Parameters
input_signal : array_like of float
Real-valued input sequence x[k] with shape (N,).
desired_signal : array_like of float
Real-valued desired/reference sequence d[k] with shape (N,).
Must have the same length as input_signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the filtered equation-error trajectory in
result.extra["auxiliary_error"] with shape (N,).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
"True IIR" output sequence y[k].
- errors : ndarray of float, shape (N,)
Output error sequence e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_posteriori" (the update is driven by the filtered
equation error).
- extra : dict
Empty unless return_internal_states=True.
27class BilinearRLS(AdaptiveFilter): 28 """ 29 Bilinear RLS adaptive filter (real-valued). 30 31 RLS algorithm with a fixed 4-dimensional *bilinear* regressor structure, 32 following Diniz (Alg. 11.3). The regressor couples the current input with 33 past desired samples to model a simple bilinear relationship. 34 35 Parameters 36 ---------- 37 forgetting_factor : float, optional 38 Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.98. 39 delta : float, optional 40 Regularization parameter used to initialize the inverse correlation 41 matrix as ``P(0) = I/delta`` (requires ``delta > 0``). Default is 1.0. 42 w_init : array_like of float, optional 43 Initial coefficient vector ``w(0)`` with shape ``(4,)``. If None, 44 initializes with zeros. 45 safe_eps : float, optional 46 Small positive constant used to guard denominators. Default is 1e-12. 47 48 Notes 49 ----- 50 Real-valued only 51 This implementation is restricted to real-valued signals and coefficients 52 (``supports_complex=False``). The constraint is enforced via 53 ``@ensure_real_signals`` on :meth:`optimize`. 54 55 Bilinear regressor (as implemented) 56 This implementation uses a 4-component regressor: 57 58 .. math:: 59 u[k] = 60 \\begin{bmatrix} 61 x[k] \\\\ 62 d[k-1] \\\\ 63 x[k]d[k-1] \\\\ 64 x[k-1]d[k-1] 65 \\end{bmatrix} 66 \\in \\mathbb{R}^{4}. 67 68 The state ``x[k-1]`` and ``d[k-1]`` are taken from the previous iteration, 69 with ``x[-1] = 0`` and ``d[-1] = 0`` at initialization. 70 71 RLS recursion (a priori form) 72 With 73 74 .. math:: 75 y[k] = w^T[k-1] u[k], \\qquad e[k] = d[k] - y[k], 76 77 the gain vector is 78 79 .. math:: 80 g[k] = \\frac{P[k-1] u[k]}{\\lambda + u^T[k] P[k-1] u[k]}, 81 82 the inverse correlation update is 83 84 .. math:: 85 P[k] = \\frac{1}{\\lambda}\\left(P[k-1] - g[k] u^T[k] P[k-1]\\right), 86 87 and the coefficient update is 88 89 .. math:: 90 w[k] = w[k-1] + g[k] e[k]. 91 92 Implementation details 93 - The denominator ``lambda + u^T P u`` is guarded by ``safe_eps`` to avoid 94 numerical issues when very small. 95 - Coefficient history is recorded via the base class. 96 97 References 98 ---------- 99 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 100 Implementation*, 5th ed., Algorithm 11.3. 101 """ 102 103 supports_complex: bool = False 104 105 def __init__( 106 self, 107 forgetting_factor: float = 0.98, 108 delta: float = 1.0, 109 w_init: Optional[ArrayLike] = None, 110 *, 111 safe_eps: float = 1e-12, 112 ) -> None: 113 n_coeffs = 4 114 super().__init__(filter_order=n_coeffs - 1, w_init=w_init) 115 116 self.lambda_factor = float(forgetting_factor) 117 if not (0.0 < self.lambda_factor <= 1.0): 118 raise ValueError( 119 f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}." 120 ) 121 122 self.delta = float(delta) 123 if self.delta <= 0.0: 124 raise ValueError(f"delta must be > 0. Got delta={self.delta}.") 125 126 self._safe_eps = float(safe_eps) 127 128 self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta 129 130 @validate_input 131 @ensure_real_signals 132 def optimize( 133 self, 134 input_signal: np.ndarray, 135 desired_signal: np.ndarray, 136 verbose: bool = False, 137 return_internal_states: bool = False, 138 ) -> OptimizationResult: 139 """ 140 Executes the bilinear RLS adaptation loop over paired input/desired sequences. 141 142 Parameters 143 ---------- 144 input_signal : array_like of float 145 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 146 desired_signal : array_like of float 147 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 148 verbose : bool, optional 149 If True, prints the total runtime after completion. 150 return_internal_states : bool, optional 151 If True, includes the last internal states in ``result.extra``: 152 ``"P_last"``, ``"last_regressor"`` (``u[k]``), and ``"last_gain"`` (``g[k]``). 153 154 Returns 155 ------- 156 OptimizationResult 157 Result object with fields: 158 - outputs : ndarray of float, shape ``(N,)`` 159 Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``. 160 - errors : ndarray of float, shape ``(N,)`` 161 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 162 - coefficients : ndarray of float 163 Coefficient history recorded by the base class. 164 - error_type : str 165 Set to ``"a_priori"``. 166 - extra : dict, optional 167 Present only if ``return_internal_states=True``. 168 """ 169 t0 = perf_counter() 170 171 x = np.asarray(input_signal, dtype=np.float64).ravel() 172 d = np.asarray(desired_signal, dtype=np.float64).ravel() 173 174 n_samples = int(x.size) 175 outputs = np.zeros(n_samples, dtype=np.float64) 176 errors = np.zeros(n_samples, dtype=np.float64) 177 178 x_prev = 0.0 179 d_prev = 0.0 180 181 last_u: Optional[np.ndarray] = None 182 last_k: Optional[np.ndarray] = None 183 184 for k in range(n_samples): 185 u = np.array( 186 [x[k], d_prev, x[k] * d_prev, x_prev * d_prev], 187 dtype=np.float64, 188 ) 189 last_u = u 190 191 y_k = float(np.dot(self.w, u)) 192 outputs[k] = y_k 193 194 e_k = float(d[k] - y_k) 195 errors[k] = e_k 196 197 Pu = self.P @ u 198 denom = float(self.lambda_factor + (u @ Pu)) 199 if abs(denom) < self._safe_eps: 200 denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps) 201 202 k_gain = Pu / denom 203 last_k = k_gain 204 205 self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor 206 207 self.w = self.w + k_gain * e_k 208 self._record_history() 209 210 x_prev = float(x[k]) 211 d_prev = float(d[k]) 212 213 runtime_s = float(perf_counter() - t0) 214 if verbose: 215 print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms") 216 217 extra: Optional[Dict[str, Any]] = None 218 if return_internal_states: 219 extra = { 220 "P_last": self.P.copy(), 221 "last_regressor": None if last_u is None else last_u.copy(), 222 "last_gain": None if last_k is None else last_k.copy(), 223 } 224 225 return self._pack_results( 226 outputs=outputs, 227 errors=errors, 228 runtime_s=runtime_s, 229 error_type="a_priori", 230 extra=extra, 231 )
Bilinear RLS adaptive filter (real-valued).
RLS algorithm with a fixed 4-dimensional bilinear regressor structure, following Diniz (Alg. 11.3). The regressor couples the current input with past desired samples to model a simple bilinear relationship.
Parameters
forgetting_factor : float, optional
Forgetting factor lambda with 0 < lambda <= 1. Default is 0.98.
delta : float, optional
Regularization parameter used to initialize the inverse correlation
matrix as P(0) = I/delta (requires delta > 0). Default is 1.0.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (4,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant used to guard denominators. Default is 1e-12.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Bilinear regressor (as implemented) This implementation uses a 4-component regressor:
$$u[k] =
\begin{bmatrix} x[k] \ d[k-1] \ x[k]d[k-1] \ x[k-1]d[k-1] \end{bmatrix} \in \mathbb{R}^{4}.$$
The state ``x[k-1]`` and ``d[k-1]`` are taken from the previous iteration,
with ``x[-1] = 0`` and ``d[-1] = 0`` at initialization.
RLS recursion (a priori form) With
$$y[k] = w^T[k-1] u[k], \qquad e[k] = d[k] - y[k],$$
the gain vector is
$$g[k] = \frac{P[k-1] u[k]}{\lambda + u^T[k] P[k-1] u[k]},$$
the inverse correlation update is
$$P[k] = \frac{1}{\lambda}\left(P[k-1] - g[k] u^T[k] P[k-1]\right),$$
and the coefficient update is
$$w[k] = w[k-1] + g[k] e[k].$$
Implementation details
- The denominator lambda + u^T P u is guarded by safe_eps to avoid
numerical issues when very small.
- Coefficient history is recorded via the base class.
References
105 def __init__( 106 self, 107 forgetting_factor: float = 0.98, 108 delta: float = 1.0, 109 w_init: Optional[ArrayLike] = None, 110 *, 111 safe_eps: float = 1e-12, 112 ) -> None: 113 n_coeffs = 4 114 super().__init__(filter_order=n_coeffs - 1, w_init=w_init) 115 116 self.lambda_factor = float(forgetting_factor) 117 if not (0.0 < self.lambda_factor <= 1.0): 118 raise ValueError( 119 f"forgetting_factor must satisfy 0 < forgetting_factor <= 1. Got {self.lambda_factor}." 120 ) 121 122 self.delta = float(delta) 123 if self.delta <= 0.0: 124 raise ValueError(f"delta must be > 0. Got delta={self.delta}.") 125 126 self._safe_eps = float(safe_eps) 127 128 self.P = np.eye(n_coeffs, dtype=np.float64) / self.delta
130 @validate_input 131 @ensure_real_signals 132 def optimize( 133 self, 134 input_signal: np.ndarray, 135 desired_signal: np.ndarray, 136 verbose: bool = False, 137 return_internal_states: bool = False, 138 ) -> OptimizationResult: 139 """ 140 Executes the bilinear RLS adaptation loop over paired input/desired sequences. 141 142 Parameters 143 ---------- 144 input_signal : array_like of float 145 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 146 desired_signal : array_like of float 147 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 148 verbose : bool, optional 149 If True, prints the total runtime after completion. 150 return_internal_states : bool, optional 151 If True, includes the last internal states in ``result.extra``: 152 ``"P_last"``, ``"last_regressor"`` (``u[k]``), and ``"last_gain"`` (``g[k]``). 153 154 Returns 155 ------- 156 OptimizationResult 157 Result object with fields: 158 - outputs : ndarray of float, shape ``(N,)`` 159 Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``. 160 - errors : ndarray of float, shape ``(N,)`` 161 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 162 - coefficients : ndarray of float 163 Coefficient history recorded by the base class. 164 - error_type : str 165 Set to ``"a_priori"``. 166 - extra : dict, optional 167 Present only if ``return_internal_states=True``. 168 """ 169 t0 = perf_counter() 170 171 x = np.asarray(input_signal, dtype=np.float64).ravel() 172 d = np.asarray(desired_signal, dtype=np.float64).ravel() 173 174 n_samples = int(x.size) 175 outputs = np.zeros(n_samples, dtype=np.float64) 176 errors = np.zeros(n_samples, dtype=np.float64) 177 178 x_prev = 0.0 179 d_prev = 0.0 180 181 last_u: Optional[np.ndarray] = None 182 last_k: Optional[np.ndarray] = None 183 184 for k in range(n_samples): 185 u = np.array( 186 [x[k], d_prev, x[k] * d_prev, x_prev * d_prev], 187 dtype=np.float64, 188 ) 189 last_u = u 190 191 y_k = float(np.dot(self.w, u)) 192 outputs[k] = y_k 193 194 e_k = float(d[k] - y_k) 195 errors[k] = e_k 196 197 Pu = self.P @ u 198 denom = float(self.lambda_factor + (u @ Pu)) 199 if abs(denom) < self._safe_eps: 200 denom = float(np.sign(denom) * self._safe_eps) if denom != 0.0 else float(self._safe_eps) 201 202 k_gain = Pu / denom 203 last_k = k_gain 204 205 self.P = (self.P - np.outer(k_gain, Pu)) / self.lambda_factor 206 207 self.w = self.w + k_gain * e_k 208 self._record_history() 209 210 x_prev = float(x[k]) 211 d_prev = float(d[k]) 212 213 runtime_s = float(perf_counter() - t0) 214 if verbose: 215 print(f"[BilinearRLS] Completed in {runtime_s * 1000:.03f} ms") 216 217 extra: Optional[Dict[str, Any]] = None 218 if return_internal_states: 219 extra = { 220 "P_last": self.P.copy(), 221 "last_regressor": None if last_u is None else last_u.copy(), 222 "last_gain": None if last_k is None else last_k.copy(), 223 } 224 225 return self._pack_results( 226 outputs=outputs, 227 errors=errors, 228 runtime_s=runtime_s, 229 error_type="a_priori", 230 extra=extra, 231 )
Executes the bilinear RLS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"P_last", "last_regressor" (u[k]), and "last_gain" (g[k]).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar a priori output sequence, y[k] = w^T[k-1] u[k].
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
27class ComplexRBF(AdaptiveFilter): 28 """ 29 Complex Radial Basis Function (CRBF) adaptive network (complex-valued). 30 31 Complex-valued RBF adaptive model following Diniz (Alg. 11.6). The network 32 output is formed from Gaussian radial basis functions centered at complex 33 vectors, combined by complex weights. 34 35 Parameters 36 ---------- 37 n_neurons : int 38 Number of RBF neurons (centers/basis functions). 39 input_dim : int 40 Regressor dimension (length of :math:`u[k]`). 41 ur : float, optional 42 Step-size for centers update. Default is 0.01. 43 uw : float, optional 44 Step-size for weights update. Default is 0.01. 45 us : float, optional 46 Step-size for spread (sigma) update. Default is 0.01. 47 w_init : array_like of complex, optional 48 Initial neuron weights :math:`w(0)` with shape ``(n_neurons,)``. If None, 49 weights are initialized randomly (complex Gaussian). 50 sigma_init : float, optional 51 Initial spread used for all neurons (must be > 0). Default is 1.0. 52 rng : numpy.random.Generator, optional 53 Random generator used for reproducible initialization when ``w_init`` is None 54 (and for centers initialization). If None, uses ``np.random.default_rng()``. 55 56 Notes 57 ----- 58 Complex-valued 59 This implementation supports complex-valued signals and coefficients 60 (``supports_complex=True``). 61 62 Input handling 63 :meth:`optimize` accepts either: 64 1) A 1D input signal ``x[k]`` with shape ``(N,)``. A tapped-delay regressor 65 matrix ``U`` with shape ``(N, input_dim)`` is built internally using 66 67 .. math:: 68 u[k] = [x[k], x[k-1], \\dots, x[k-input\\_dim+1]]^T. 69 70 2) A 2D regressor matrix ``U`` with shape ``(N, input_dim)`` whose rows are 71 used directly as :math:`u[k]`. 72 73 RBF activations and output (as implemented) 74 For neuron :math:`p` with complex center :math:`c_p \\in \\mathbb{C}^{D}` 75 (stored as row ``vet[p, :]``) and real spread :math:`\\sigma_p > 0` 76 (stored in ``sigma[p]``), the activation is 77 78 .. math:: 79 f_p(u[k]) = \\exp\\left( -\\frac{\\lVert u[k] - c_p \\rVert^2}{\\sigma_p^2} \\right), 80 81 where :math:`\\lVert \\cdot \\rVert^2` is implemented as the sum of squared 82 real and imaginary parts. Stacking all activations: 83 84 .. math:: 85 f(u[k]) = [f_1(u[k]), \\dots, f_P(u[k])]^{T} \\in \\mathbb{R}^{P}, 86 87 the (a priori) output is computed as 88 89 .. math:: 90 y[k] = w^H[k-1] f(u[k]) = \\sum_{p=1}^{P} \\overline{w_p[k-1]}\\, f_p(u[k]). 91 92 In code, this corresponds to ``np.vdot(w_old, f)``. 93 94 Adaptation loop (a priori form, as implemented) 95 With error 96 97 .. math:: 98 e[k] = d[k] - y[k], 99 100 the weight update is 101 102 .. math:: 103 w[k] = w[k-1] + 2\\,\\mu_w\\, \\overline{e[k]}\\, f(u[k]), 104 105 where ``mu_w = uw``. The center and spread updates follow the expressions 106 implemented in the code via the intermediate term ``phi = real(e[k] * w_old)``. 107 (The exact algebraic form is determined by Alg. 11.6 and the original implementation.) 108 109 Numerical safeguards 110 - ``safe_eps`` in :meth:`optimize` guards denominators involving ``sigma`` 111 to avoid division by very small values. 112 - ``sigma`` is clipped from below by ``safe_eps`` after each update. 113 114 Implementation details 115 - Coefficient history recorded by the base class corresponds to the neuron 116 weights ``w``. Centers (``vet``) and spreads (``sigma``) are not part of the 117 base history but can be returned via ``result.extra`` when requested. 118 119 References 120 ---------- 121 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 122 Implementation*, 5th ed., Algorithm 11.6. 123 """ 124 125 supports_complex: bool = True 126 127 def __init__( 128 self, 129 n_neurons: int, 130 input_dim: int, 131 ur: float = 0.01, 132 uw: float = 0.01, 133 us: float = 0.01, 134 w_init: Optional[ArrayLike] = None, 135 *, 136 sigma_init: float = 1.0, 137 rng: Optional[np.random.Generator] = None, 138 ) -> None: 139 140 n_neurons = int(n_neurons) 141 input_dim = int(input_dim) 142 if n_neurons <= 0: 143 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 144 if input_dim <= 0: 145 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 146 if sigma_init <= 0.0: 147 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 148 149 super().__init__(filter_order=n_neurons - 1, w_init=None) 150 151 self.n_neurons = n_neurons 152 self.input_dim = input_dim 153 self.ur = float(ur) 154 self.uw = float(uw) 155 self.us = float(us) 156 157 self._rng = rng if rng is not None else np.random.default_rng() 158 159 if w_init is None: 160 w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons) 161 self.w = w0.astype(complex) 162 else: 163 w0 = np.asarray(w_init, dtype=complex).reshape(-1) 164 if w0.size != n_neurons: 165 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 166 self.w = w0 167 168 self.vet = 0.5 * ( 169 self._rng.standard_normal((n_neurons, input_dim)) 170 + 1j * self._rng.standard_normal((n_neurons, input_dim)) 171 ).astype(complex) 172 173 self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init) 174 175 self._record_history() 176 177 @staticmethod 178 def _build_regressors_from_signal(x: np.ndarray, input_dim: int) -> np.ndarray: 179 """Build tapped-delay regressors from a 1D signal (N,)->(N,input_dim).""" 180 x = np.asarray(x, dtype=complex).ravel() 181 n = int(x.size) 182 m = int(input_dim - 1) 183 184 x_padded = np.zeros(n + m, dtype=complex) 185 x_padded[m:] = x 186 187 U = np.zeros((n, input_dim), dtype=complex) 188 for k in range(n): 189 U[k, :] = x_padded[k : k + input_dim][::-1] 190 return U 191 192 @staticmethod 193 def _squared_distance_complex(u: np.ndarray, centers: np.ndarray) -> np.ndarray: 194 """ 195 Compute ||u - c_p||^2 for each center row. 196 u: (input_dim,) 197 centers: (n_neurons, input_dim) 198 returns: (n_neurons,) 199 """ 200 diff = u[None, :] - centers 201 return np.sum(diff.real**2 + diff.imag**2, axis=1) 202 203 def optimize( 204 self, 205 input_signal: Union[np.ndarray, list], 206 desired_signal: Union[np.ndarray, list], 207 verbose: bool = False, 208 return_internal_states: bool = False, 209 *, 210 safe_eps: float = 1e-12, 211 ) -> OptimizationResult: 212 """ 213 Executes the CRBF adaptation loop over paired regressor/desired sequences. 214 215 Parameters 216 ---------- 217 input_signal : array_like of complex 218 Either: 219 - Input signal ``x[k]`` with shape ``(N,)`` (will be flattened), in which 220 case tapped-delay regressors of length ``input_dim`` are built internally; or 221 - Regressor matrix ``U`` with shape ``(N, input_dim)`` (each row is ``u[k]``). 222 desired_signal : array_like of complex 223 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 224 verbose : bool, optional 225 If True, prints the total runtime after completion. 226 return_internal_states : bool, optional 227 If True, includes the last internal states in ``result.extra``: 228 ``"centers_last"``, ``"sigma_last"``, ``"last_activation"``, and 229 ``"last_regressor"`` (plus ``"input_dim"`` and ``"n_neurons"``). 230 safe_eps : float, optional 231 Small positive constant used to guard denominators involving ``sigma``. 232 Default is 1e-12. 233 234 Returns 235 ------- 236 OptimizationResult 237 Result object with fields: 238 - outputs : ndarray of complex, shape ``(N,)`` 239 Scalar a priori output sequence, ``y[k] = w^H[k-1] f(u[k])``. 240 - errors : ndarray of complex, shape ``(N,)`` 241 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 242 - coefficients : ndarray of complex 243 Coefficient history recorded by the base class (neuron weights ``w``). 244 - error_type : str 245 Set to ``"a_priori"``. 246 - extra : dict, optional 247 Present only if ``return_internal_states=True``. 248 """ 249 t0 = perf_counter() 250 251 x_in = np.asarray(input_signal) 252 d = np.asarray(desired_signal, dtype=complex).ravel() 253 254 if x_in.ndim == 1: 255 U = self._build_regressors_from_signal(x_in, self.input_dim) 256 elif x_in.ndim == 2: 257 U = np.asarray(x_in, dtype=complex) 258 if U.shape[1] != self.input_dim: 259 raise ValueError( 260 f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}." 261 ) 262 else: 263 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 264 265 N = int(U.shape[0]) 266 if d.size != N: 267 raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).") 268 269 outputs = np.zeros(N, dtype=complex) 270 errors = np.zeros(N, dtype=complex) 271 272 last_f: Optional[np.ndarray] = None 273 last_u: Optional[np.ndarray] = None 274 275 eps = float(safe_eps) 276 277 for k in range(N): 278 u = U[k, :] 279 last_u = u 280 281 dis_sq = self._squared_distance_complex(u, self.vet) 282 sigma_sq = np.maximum(self.sigma**2, float(safe_eps)) 283 f = np.exp(-dis_sq / sigma_sq) 284 last_f = f 285 286 287 w_old = self.w 288 y_k = complex(np.vdot(w_old, f)) 289 outputs[k] = y_k 290 e_k = d[k] - y_k 291 errors[k] = e_k 292 293 self.w = self.w + (2.0 * self.uw) * f * np.conj(e_k) 294 295 phi = np.real(e_k * w_old) 296 297 298 denom_sigma = np.maximum(self.sigma**3, eps) 299 grad_sigma = (4.0 * self.us) * f * phi * dis_sq / denom_sigma 300 self.sigma = np.maximum(self.sigma + grad_sigma, eps) 301 302 denom_c = np.maximum(self.sigma**2, eps) 303 304 self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * phi[:, None]) * (u - self.vet) / denom_c[:, None] 305 306 self._record_history() 307 308 runtime_s = float(perf_counter() - t0) 309 if verbose: 310 print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms") 311 312 extra: Optional[Dict[str, Any]] = None 313 if return_internal_states: 314 extra = { 315 "centers_last": self.vet.copy(), 316 "sigma_last": self.sigma.copy(), 317 "last_activation": None if last_f is None else np.asarray(last_f).copy(), 318 "last_regressor": None if last_u is None else np.asarray(last_u).copy(), 319 "input_dim": int(self.input_dim), 320 "n_neurons": int(self.n_neurons), 321 } 322 323 return self._pack_results( 324 outputs=outputs, 325 errors=errors, 326 runtime_s=runtime_s, 327 error_type="a_priori", 328 extra=extra, 329 )
Complex Radial Basis Function (CRBF) adaptive network (complex-valued).
Complex-valued RBF adaptive model following Diniz (Alg. 11.6). The network output is formed from Gaussian radial basis functions centered at complex vectors, combined by complex weights.
Parameters
n_neurons : int
Number of RBF neurons (centers/basis functions).
input_dim : int
Regressor dimension (length of \( u[k] \)).
ur : float, optional
Step-size for centers update. Default is 0.01.
uw : float, optional
Step-size for weights update. Default is 0.01.
us : float, optional
Step-size for spread (sigma) update. Default is 0.01.
w_init : array_like of complex, optional
Initial neuron weights \( w(0) \) with shape (n_neurons,). If None,
weights are initialized randomly (complex Gaussian).
sigma_init : float, optional
Initial spread used for all neurons (must be > 0). Default is 1.0.
rng : numpy.random.Generator, optional
Random generator used for reproducible initialization when w_init is None
(and for centers initialization). If None, uses np.random.default_rng().
Notes
Complex-valued
This implementation supports complex-valued signals and coefficients
(supports_complex=True).
Input handling
optimize() accepts either:
1) A 1D input signal x[k] with shape (N,). A tapped-delay regressor
matrix U with shape (N, input_dim) is built internally using
$$u[k] = [x[k], x[k-1], \dots, x[k-input\_dim+1]]^T.$$
2) A 2D regressor matrix ``U`` with shape ``(N, input_dim)`` whose rows are
used directly as \\( u[k] \\).
RBF activations and output (as implemented)
For neuron \( p \) with complex center \( c_p \in \mathbb{C}^{D} \)
(stored as row vet[p, :]) and real spread \( \sigma_p > 0 \)
(stored in sigma[p]), the activation is
$$f_p(u[k]) = \exp\left( -\frac{\lVert u[k] - c_p \rVert^2}{\sigma_p^2} \right),$$
where \\( \lVert \cdot \rVert^2 \\) is implemented as the sum of squared
real and imaginary parts. Stacking all activations:
$$f(u[k]) = [f_1(u[k]), \dots, f_P(u[k])]^{T} \in \mathbb{R}^{P},$$
the (a priori) output is computed as
$$y[k] = w^H[k-1] f(u[k]) = \sum_{p=1}^{P} \overline{w_p[k-1]}\, f_p(u[k]).$$
In code, this corresponds to ``np.vdot(w_old, f)``.
Adaptation loop (a priori form, as implemented) With error
$$e[k] = d[k] - y[k],$$
the weight update is
$$w[k] = w[k-1] + 2\,\mu_w\, \overline{e[k]}\, f(u[k]),$$
where ``mu_w = uw``. The center and spread updates follow the expressions
implemented in the code via the intermediate term ``phi = real(e[k] * w_old)``.
(The exact algebraic form is determined by Alg. 11.6 and the original implementation.)
Numerical safeguards
- safe_eps in optimize() guards denominators involving sigma
to avoid division by very small values.
- sigma is clipped from below by safe_eps after each update.
Implementation details
- Coefficient history recorded by the base class corresponds to the neuron
weights w. Centers (vet) and spreads (sigma) are not part of the
base history but can be returned via result.extra when requested.
References
127 def __init__( 128 self, 129 n_neurons: int, 130 input_dim: int, 131 ur: float = 0.01, 132 uw: float = 0.01, 133 us: float = 0.01, 134 w_init: Optional[ArrayLike] = None, 135 *, 136 sigma_init: float = 1.0, 137 rng: Optional[np.random.Generator] = None, 138 ) -> None: 139 140 n_neurons = int(n_neurons) 141 input_dim = int(input_dim) 142 if n_neurons <= 0: 143 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 144 if input_dim <= 0: 145 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 146 if sigma_init <= 0.0: 147 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 148 149 super().__init__(filter_order=n_neurons - 1, w_init=None) 150 151 self.n_neurons = n_neurons 152 self.input_dim = input_dim 153 self.ur = float(ur) 154 self.uw = float(uw) 155 self.us = float(us) 156 157 self._rng = rng if rng is not None else np.random.default_rng() 158 159 if w_init is None: 160 w0 = self._rng.standard_normal(n_neurons) + 1j * self._rng.standard_normal(n_neurons) 161 self.w = w0.astype(complex) 162 else: 163 w0 = np.asarray(w_init, dtype=complex).reshape(-1) 164 if w0.size != n_neurons: 165 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 166 self.w = w0 167 168 self.vet = 0.5 * ( 169 self._rng.standard_normal((n_neurons, input_dim)) 170 + 1j * self._rng.standard_normal((n_neurons, input_dim)) 171 ).astype(complex) 172 173 self.sigma = np.ones(n_neurons, dtype=float) * float(sigma_init) 174 175 self._record_history()
203 def optimize( 204 self, 205 input_signal: Union[np.ndarray, list], 206 desired_signal: Union[np.ndarray, list], 207 verbose: bool = False, 208 return_internal_states: bool = False, 209 *, 210 safe_eps: float = 1e-12, 211 ) -> OptimizationResult: 212 """ 213 Executes the CRBF adaptation loop over paired regressor/desired sequences. 214 215 Parameters 216 ---------- 217 input_signal : array_like of complex 218 Either: 219 - Input signal ``x[k]`` with shape ``(N,)`` (will be flattened), in which 220 case tapped-delay regressors of length ``input_dim`` are built internally; or 221 - Regressor matrix ``U`` with shape ``(N, input_dim)`` (each row is ``u[k]``). 222 desired_signal : array_like of complex 223 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 224 verbose : bool, optional 225 If True, prints the total runtime after completion. 226 return_internal_states : bool, optional 227 If True, includes the last internal states in ``result.extra``: 228 ``"centers_last"``, ``"sigma_last"``, ``"last_activation"``, and 229 ``"last_regressor"`` (plus ``"input_dim"`` and ``"n_neurons"``). 230 safe_eps : float, optional 231 Small positive constant used to guard denominators involving ``sigma``. 232 Default is 1e-12. 233 234 Returns 235 ------- 236 OptimizationResult 237 Result object with fields: 238 - outputs : ndarray of complex, shape ``(N,)`` 239 Scalar a priori output sequence, ``y[k] = w^H[k-1] f(u[k])``. 240 - errors : ndarray of complex, shape ``(N,)`` 241 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 242 - coefficients : ndarray of complex 243 Coefficient history recorded by the base class (neuron weights ``w``). 244 - error_type : str 245 Set to ``"a_priori"``. 246 - extra : dict, optional 247 Present only if ``return_internal_states=True``. 248 """ 249 t0 = perf_counter() 250 251 x_in = np.asarray(input_signal) 252 d = np.asarray(desired_signal, dtype=complex).ravel() 253 254 if x_in.ndim == 1: 255 U = self._build_regressors_from_signal(x_in, self.input_dim) 256 elif x_in.ndim == 2: 257 U = np.asarray(x_in, dtype=complex) 258 if U.shape[1] != self.input_dim: 259 raise ValueError( 260 f"input_signal has shape {U.shape}, expected second dim input_dim={self.input_dim}." 261 ) 262 else: 263 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 264 265 N = int(U.shape[0]) 266 if d.size != N: 267 raise ValueError(f"Inconsistent lengths: regressors({N}) != desired({d.size}).") 268 269 outputs = np.zeros(N, dtype=complex) 270 errors = np.zeros(N, dtype=complex) 271 272 last_f: Optional[np.ndarray] = None 273 last_u: Optional[np.ndarray] = None 274 275 eps = float(safe_eps) 276 277 for k in range(N): 278 u = U[k, :] 279 last_u = u 280 281 dis_sq = self._squared_distance_complex(u, self.vet) 282 sigma_sq = np.maximum(self.sigma**2, float(safe_eps)) 283 f = np.exp(-dis_sq / sigma_sq) 284 last_f = f 285 286 287 w_old = self.w 288 y_k = complex(np.vdot(w_old, f)) 289 outputs[k] = y_k 290 e_k = d[k] - y_k 291 errors[k] = e_k 292 293 self.w = self.w + (2.0 * self.uw) * f * np.conj(e_k) 294 295 phi = np.real(e_k * w_old) 296 297 298 denom_sigma = np.maximum(self.sigma**3, eps) 299 grad_sigma = (4.0 * self.us) * f * phi * dis_sq / denom_sigma 300 self.sigma = np.maximum(self.sigma + grad_sigma, eps) 301 302 denom_c = np.maximum(self.sigma**2, eps) 303 304 self.vet = self.vet + (2.0 * self.ur) * (f[:, None] * phi[:, None]) * (u - self.vet) / denom_c[:, None] 305 306 self._record_history() 307 308 runtime_s = float(perf_counter() - t0) 309 if verbose: 310 print(f"[ComplexRBF] Completed in {runtime_s * 1000:.03f} ms") 311 312 extra: Optional[Dict[str, Any]] = None 313 if return_internal_states: 314 extra = { 315 "centers_last": self.vet.copy(), 316 "sigma_last": self.sigma.copy(), 317 "last_activation": None if last_f is None else np.asarray(last_f).copy(), 318 "last_regressor": None if last_u is None else np.asarray(last_u).copy(), 319 "input_dim": int(self.input_dim), 320 "n_neurons": int(self.n_neurons), 321 } 322 323 return self._pack_results( 324 outputs=outputs, 325 errors=errors, 326 runtime_s=runtime_s, 327 error_type="a_priori", 328 extra=extra, 329 )
Executes the CRBF adaptation loop over paired regressor/desired sequences.
Parameters
input_signal : array_like of complex
Either:
- Input signal x[k] with shape (N,) (will be flattened), in which
case tapped-delay regressors of length input_dim are built internally; or
- Regressor matrix U with shape (N, input_dim) (each row is u[k]).
desired_signal : array_like of complex
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"centers_last", "sigma_last", "last_activation", and
"last_regressor" (plus "input_dim" and "n_neurons").
safe_eps : float, optional
Small positive constant used to guard denominators involving sigma.
Default is 1e-12.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar a priori output sequence, y[k] = w^H[k-1] f(u[k]).
- errors : ndarray of complex, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of complex
Coefficient history recorded by the base class (neuron weights w).
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
49class MultilayerPerceptron(AdaptiveFilter): 50 """ 51 Multilayer Perceptron (MLP) adaptive model with momentum (real-valued). 52 53 Online adaptation of a 2-hidden-layer feedforward neural network using a 54 stochastic-gradient update with momentum. The model is treated as an 55 adaptive nonlinear filter. 56 57 The forward pass is: 58 59 .. math:: 60 v_1[k] = W_1 u[k] - b_1, \\qquad y_1[k] = \\phi(v_1[k]), 61 62 .. math:: 63 v_2[k] = W_2 y_1[k] - b_2, \\qquad y_2[k] = \\phi(v_2[k]), 64 65 .. math:: 66 y[k] = w_3^T y_2[k] - b_3, 67 68 where ``\\phi`` is either ``tanh`` or ``sigmoid``. 69 70 Parameters 71 ---------- 72 n_neurons : int, optional 73 Number of neurons in each hidden layer. Default is 10. 74 input_dim : int, optional 75 Dimension of the regressor vector ``u[k]``. Default is 3. 76 If :meth:`optimize` is called with a 1D input signal, this must be 3 77 (see Notes). 78 step_size : float, optional 79 Gradient step size ``mu``. Default is 1e-2. 80 momentum : float, optional 81 Momentum factor in ``[0, 1)``. Default is 0.9. 82 activation : {"tanh", "sigmoid"}, optional 83 Activation function used in both hidden layers. Default is ``"tanh"``. 84 w_init : array_like of float, optional 85 Optional initialization for the output-layer weights ``w_3(0)``, with 86 shape ``(n_neurons,)``. If None, Xavier/Glorot-style uniform 87 initialization is used for all weights. 88 rng : numpy.random.Generator, optional 89 Random generator used for initialization. 90 91 Notes 92 ----- 93 Real-valued only 94 This implementation is restricted to real-valued signals and parameters 95 (``supports_complex=False``). The constraint is enforced via 96 ``@ensure_real_signals`` on :meth:`optimize`. 97 98 Input formats 99 The method :meth:`optimize` accepts two input formats: 100 101 1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``: 102 each row is used directly as ``u[k]``. 103 104 2. **Scalar input signal** ``x[k]`` with shape ``(N,)``: 105 a 3-dimensional regressor is formed internally as 106 107 .. math:: 108 u[k] = [x[k],\\ d[k-1],\\ x[k-1]]^T, 109 110 therefore this mode requires ``input_dim = 3``. 111 112 Parameter update (as implemented) 113 Let the a priori error be ``e[k] = d[k] - y[k]``. This implementation 114 applies a momentum update of the form 115 116 .. math:: 117 \\theta[k+1] = \\theta[k] + \\Delta\\theta[k] + \\beta\\,\\Delta\\theta[k-1], 118 119 where ``\\beta`` is the momentum factor and ``\\Delta\\theta[k]`` is a 120 gradient step proportional to ``e[k]``. (See source for the exact 121 per-parameter expressions.) 122 123 Library conventions 124 - The base class ``filter_order`` is used only as a size indicator 125 (set to ``n_neurons - 1``). 126 - ``OptimizationResult.coefficients`` stores a *proxy* coefficient 127 history: the output-layer weight vector ``w3`` as tracked through 128 ``self.w`` for compatibility with the base API. 129 - Full parameter trajectories can be returned in ``result.extra`` when 130 ``return_internal_states=True``. 131 132 References 133 ---------- 134 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 135 Implementation*, 5th ed., Algorithm 11.4 (MLP adaptive structure; here 136 extended with momentum and selectable activations). 137 """ 138 139 supports_complex: bool = False 140 141 def __init__( 142 self, 143 n_neurons: int = 10, 144 input_dim: int = 3, 145 step_size: float = 0.01, 146 momentum: float = 0.9, 147 activation: str = "tanh", 148 w_init: Optional[ArrayLike] = None, 149 *, 150 rng: Optional[np.random.Generator] = None, 151 ) -> None: 152 n_neurons = int(n_neurons) 153 input_dim = int(input_dim) 154 if n_neurons <= 0: 155 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 156 if input_dim <= 0: 157 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 158 if not (0.0 <= float(momentum) < 1.0): 159 raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.") 160 161 super().__init__(filter_order=n_neurons - 1, w_init=None) 162 163 self.n_neurons = n_neurons 164 self.input_dim = input_dim 165 self.step_size = float(step_size) 166 self.momentum = float(momentum) 167 168 if activation == "tanh": 169 self.act_func = _tanh 170 self.act_deriv = _dtanh 171 elif activation == "sigmoid": 172 self.act_func = _sigmoid 173 self.act_deriv = _dsigmoid 174 else: 175 raise ValueError("activation must be 'tanh' or 'sigmoid'.") 176 177 self._rng = rng if rng is not None else np.random.default_rng() 178 179 limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons))) 180 limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons))) 181 limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1))) 182 183 self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64) 184 self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64) 185 self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64) 186 187 if w_init is not None: 188 w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 189 if w3_0.size != n_neurons: 190 raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.") 191 self.w3 = w3_0 192 193 self.b1 = np.zeros(n_neurons, dtype=np.float64) 194 self.b2 = np.zeros(n_neurons, dtype=np.float64) 195 self.b3 = 0.0 196 197 self.prev_dw1 = np.zeros_like(self.w1) 198 self.prev_dw2 = np.zeros_like(self.w2) 199 self.prev_dw3 = np.zeros_like(self.w3) 200 self.prev_db1 = np.zeros_like(self.b1) 201 self.prev_db2 = np.zeros_like(self.b2) 202 self.prev_db3 = 0.0 203 204 self.w = self.w3.copy() 205 self.w_history = [] 206 self._record_history() 207 208 @staticmethod 209 def _as_regressor_matrix( 210 x_in: np.ndarray, d_in: np.ndarray, input_dim: int 211 ) -> Tuple[np.ndarray, bool]: 212 """ 213 Return (U, is_multidim). 214 215 - If x_in is 2D: U = x_in 216 - If x_in is 1D: builds U[k]=[x[k], d[k-1], x[k-1]] and requires input_dim=3 217 """ 218 x_in = np.asarray(x_in, dtype=np.float64) 219 d_in = np.asarray(d_in, dtype=np.float64).ravel() 220 221 if x_in.ndim == 2: 222 if x_in.shape[0] != d_in.size: 223 raise ValueError(f"Shape mismatch: input({x_in.shape[0]}) and desired({d_in.size}).") 224 if x_in.shape[1] != input_dim: 225 raise ValueError(f"input_signal second dim must be input_dim={input_dim}. Got {x_in.shape}.") 226 return x_in.astype(np.float64, copy=False), True 227 228 if x_in.ndim == 1: 229 if input_dim != 3: 230 raise ValueError( 231 "When input_signal is 1D, this implementation uses u[k]=[x[k], d[k-1], x[k-1]] " 232 "so input_dim must be 3." 233 ) 234 if x_in.size != d_in.size: 235 raise ValueError(f"Shape mismatch: input({x_in.size}) and desired({d_in.size}).") 236 237 N = int(x_in.size) 238 U = np.zeros((N, 3), dtype=np.float64) 239 x_prev = 0.0 240 d_prev = 0.0 241 for k in range(N): 242 U[k, :] = np.array([x_in[k], d_prev, x_prev], dtype=np.float64) 243 x_prev = float(x_in[k]) 244 d_prev = float(d_in[k]) 245 return U, False 246 247 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 248 249 @ensure_real_signals 250 def optimize( 251 self, 252 input_signal: Union[np.ndarray, list], 253 desired_signal: Union[np.ndarray, list], 254 verbose: bool = False, 255 return_internal_states: bool = False, 256 ) -> OptimizationResult: 257 """ 258 Executes the online MLP adaptation loop (with momentum). 259 260 Parameters 261 ---------- 262 input_signal : array_like of float 263 Either: 264 - regressor matrix ``U`` with shape ``(N, input_dim)``, or 265 - scalar input signal ``x[k]`` with shape ``(N,)`` (in which case the 266 regressor is built as ``u[k] = [x[k], d[k-1], x[k-1]]`` and 267 requires ``input_dim = 3``). 268 desired_signal : array_like of float 269 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 270 verbose : bool, optional 271 If True, prints the total runtime after completion. 272 return_internal_states : bool, optional 273 If True, stores parameter snapshots in ``result.extra`` (may be memory 274 intensive for long runs). 275 276 Returns 277 ------- 278 OptimizationResult 279 Result object with fields: 280 - outputs : ndarray of float, shape ``(N,)`` 281 Scalar output sequence ``y[k]``. 282 - errors : ndarray of float, shape ``(N,)`` 283 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 284 - coefficients : ndarray of float 285 Proxy coefficient history recorded by the base class (tracks 286 the output-layer weights ``w3``). 287 - error_type : str 288 Set to ``"a_priori"``. 289 - extra : dict, optional 290 Present only if ``return_internal_states=True`` with: 291 - ``w1_hist`` : list of ndarray 292 Hidden-layer-1 weight snapshots. 293 - ``w2_hist`` : list of ndarray 294 Hidden-layer-2 weight snapshots. 295 - ``w3_hist`` : list of ndarray 296 Output-layer weight snapshots. 297 - ``b1_hist`` : list of ndarray 298 Bias-1 snapshots. 299 - ``b2_hist`` : list of ndarray 300 Bias-2 snapshots. 301 - ``b3_hist`` : list of float 302 Bias-3 snapshots. 303 - ``activation`` : str 304 Activation identifier (``"tanh"`` or ``"sigmoid"``). 305 """ 306 t0 = perf_counter() 307 308 x_in = np.asarray(input_signal, dtype=np.float64) 309 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 310 311 U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim) 312 N = int(U.shape[0]) 313 314 outputs = np.zeros(N, dtype=np.float64) 315 errors = np.zeros(N, dtype=np.float64) 316 317 w1_hist: List[np.ndarray] = [] 318 w2_hist: List[np.ndarray] = [] 319 w3_hist: List[np.ndarray] = [] 320 b1_hist: List[np.ndarray] = [] 321 b2_hist: List[np.ndarray] = [] 322 b3_hist: List[float] = [] 323 324 for k in range(N): 325 u = U[k, :] 326 327 v1 = (self.w1 @ u) - self.b1 328 y1 = self.act_func(v1) 329 330 v2 = (self.w2 @ y1) - self.b2 331 y2 = self.act_func(v2) 332 333 y_k = float(np.dot(y2, self.w3) - self.b3) 334 outputs[k] = y_k 335 e_k = float(d_in[k] - y_k) 336 errors[k] = e_k 337 338 er_hid2 = e_k * self.w3 * self.act_deriv(v2) 339 er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1) 340 341 dw3 = (2.0 * self.step_size) * e_k * y2 342 self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3 343 self.prev_dw3 = dw3 344 345 db3 = (-2.0 * self.step_size) * e_k 346 self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3) 347 self.prev_db3 = db3 348 349 dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1) 350 self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2 351 self.prev_dw2 = dw2 352 353 db2 = (-2.0 * self.step_size) * er_hid2 354 self.b2 = self.b2 + db2 + self.momentum * self.prev_db2 355 self.prev_db2 = db2 356 357 dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u) 358 self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1 359 self.prev_dw1 = dw1 360 361 db1 = (-2.0 * self.step_size) * er_hid1 362 self.b1 = self.b1 + db1 + self.momentum * self.prev_db1 363 self.prev_db1 = db1 364 365 self.w = self.w3.copy() 366 self._record_history() 367 368 if return_internal_states: 369 w1_hist.append(self.w1.copy()) 370 w2_hist.append(self.w2.copy()) 371 w3_hist.append(self.w3.copy()) 372 b1_hist.append(self.b1.copy()) 373 b2_hist.append(self.b2.copy()) 374 b3_hist.append(float(self.b3)) 375 376 runtime_s = float(perf_counter() - t0) 377 if verbose: 378 print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms") 379 380 extra: Optional[Dict[str, Any]] = None 381 if return_internal_states: 382 extra = { 383 "w1_hist": w1_hist, 384 "w2_hist": w2_hist, 385 "w3_hist": w3_hist, 386 "b1_hist": b1_hist, 387 "b2_hist": b2_hist, 388 "b3_hist": b3_hist, 389 "activation": "tanh" if self.act_func is _tanh else "sigmoid", 390 } 391 392 return self._pack_results( 393 outputs=outputs, 394 errors=errors, 395 runtime_s=runtime_s, 396 error_type="a_priori", 397 extra=extra, 398 )
Multilayer Perceptron (MLP) adaptive model with momentum (real-valued).
Online adaptation of a 2-hidden-layer feedforward neural network using a stochastic-gradient update with momentum. The model is treated as an adaptive nonlinear filter.
The forward pass is:
$$v_1[k] = W_1 u[k] - b_1, \qquad y_1[k] = \phi(v_1[k]),$$
$$v_2[k] = W_2 y_1[k] - b_2, \qquad y_2[k] = \phi(v_2[k]),$$
$$y[k] = w_3^T y_2[k] - b_3,$$
where \phi is either tanh or sigmoid.
Parameters
n_neurons : int, optional
Number of neurons in each hidden layer. Default is 10.
input_dim : int, optional
Dimension of the regressor vector u[k]. Default is 3.
If optimize() is called with a 1D input signal, this must be 3
(see Notes).
step_size : float, optional
Gradient step size mu. Default is 1e-2.
momentum : float, optional
Momentum factor in [0, 1). Default is 0.9.
activation : {"tanh", "sigmoid"}, optional
Activation function used in both hidden layers. Default is "tanh".
w_init : array_like of float, optional
Optional initialization for the output-layer weights w_3(0), with
shape (n_neurons,). If None, Xavier/Glorot-style uniform
initialization is used for all weights.
rng : numpy.random.Generator, optional
Random generator used for initialization.
Notes
Real-valued only
This implementation is restricted to real-valued signals and parameters
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Input formats
The method optimize() accepts two input formats:
1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
each row is used directly as ``u[k]``.
2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
a 3-dimensional regressor is formed internally as
$$u[k] = [x[k],\ d[k-1],\ x[k-1]]^T,$$
therefore this mode requires ``input_dim = 3``.
Parameter update (as implemented)
Let the a priori error be e[k] = d[k] - y[k]. This implementation
applies a momentum update of the form
$$\theta[k+1] = \theta[k] + \Delta\theta[k] + \beta\,\Delta\theta[k-1],$$
where ``\beta`` is the momentum factor and ``\Delta\theta[k]`` is a
gradient step proportional to ``e[k]``. (See source for the exact
per-parameter expressions.)
Library conventions
- The base class filter_order is used only as a size indicator
(set to n_neurons - 1).
- OptimizationResult.coefficients stores a proxy coefficient
history: the output-layer weight vector w3 as tracked through
self.w for compatibility with the base API.
- Full parameter trajectories can be returned in result.extra when
return_internal_states=True.
References
141 def __init__( 142 self, 143 n_neurons: int = 10, 144 input_dim: int = 3, 145 step_size: float = 0.01, 146 momentum: float = 0.9, 147 activation: str = "tanh", 148 w_init: Optional[ArrayLike] = None, 149 *, 150 rng: Optional[np.random.Generator] = None, 151 ) -> None: 152 n_neurons = int(n_neurons) 153 input_dim = int(input_dim) 154 if n_neurons <= 0: 155 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 156 if input_dim <= 0: 157 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 158 if not (0.0 <= float(momentum) < 1.0): 159 raise ValueError(f"momentum must satisfy 0 <= momentum < 1. Got {momentum}.") 160 161 super().__init__(filter_order=n_neurons - 1, w_init=None) 162 163 self.n_neurons = n_neurons 164 self.input_dim = input_dim 165 self.step_size = float(step_size) 166 self.momentum = float(momentum) 167 168 if activation == "tanh": 169 self.act_func = _tanh 170 self.act_deriv = _dtanh 171 elif activation == "sigmoid": 172 self.act_func = _sigmoid 173 self.act_deriv = _dsigmoid 174 else: 175 raise ValueError("activation must be 'tanh' or 'sigmoid'.") 176 177 self._rng = rng if rng is not None else np.random.default_rng() 178 179 limit_w1 = float(np.sqrt(6.0 / (input_dim + n_neurons))) 180 limit_w2 = float(np.sqrt(6.0 / (n_neurons + n_neurons))) 181 limit_w3 = float(np.sqrt(6.0 / (n_neurons + 1))) 182 183 self.w1 = self._rng.uniform(-limit_w1, limit_w1, (n_neurons, input_dim)).astype(np.float64) 184 self.w2 = self._rng.uniform(-limit_w2, limit_w2, (n_neurons, n_neurons)).astype(np.float64) 185 self.w3 = self._rng.uniform(-limit_w3, limit_w3, (n_neurons,)).astype(np.float64) 186 187 if w_init is not None: 188 w3_0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 189 if w3_0.size != n_neurons: 190 raise ValueError(f"w_init must have length {n_neurons}, got {w3_0.size}.") 191 self.w3 = w3_0 192 193 self.b1 = np.zeros(n_neurons, dtype=np.float64) 194 self.b2 = np.zeros(n_neurons, dtype=np.float64) 195 self.b3 = 0.0 196 197 self.prev_dw1 = np.zeros_like(self.w1) 198 self.prev_dw2 = np.zeros_like(self.w2) 199 self.prev_dw3 = np.zeros_like(self.w3) 200 self.prev_db1 = np.zeros_like(self.b1) 201 self.prev_db2 = np.zeros_like(self.b2) 202 self.prev_db3 = 0.0 203 204 self.w = self.w3.copy() 205 self.w_history = [] 206 self._record_history()
249 @ensure_real_signals 250 def optimize( 251 self, 252 input_signal: Union[np.ndarray, list], 253 desired_signal: Union[np.ndarray, list], 254 verbose: bool = False, 255 return_internal_states: bool = False, 256 ) -> OptimizationResult: 257 """ 258 Executes the online MLP adaptation loop (with momentum). 259 260 Parameters 261 ---------- 262 input_signal : array_like of float 263 Either: 264 - regressor matrix ``U`` with shape ``(N, input_dim)``, or 265 - scalar input signal ``x[k]`` with shape ``(N,)`` (in which case the 266 regressor is built as ``u[k] = [x[k], d[k-1], x[k-1]]`` and 267 requires ``input_dim = 3``). 268 desired_signal : array_like of float 269 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 270 verbose : bool, optional 271 If True, prints the total runtime after completion. 272 return_internal_states : bool, optional 273 If True, stores parameter snapshots in ``result.extra`` (may be memory 274 intensive for long runs). 275 276 Returns 277 ------- 278 OptimizationResult 279 Result object with fields: 280 - outputs : ndarray of float, shape ``(N,)`` 281 Scalar output sequence ``y[k]``. 282 - errors : ndarray of float, shape ``(N,)`` 283 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 284 - coefficients : ndarray of float 285 Proxy coefficient history recorded by the base class (tracks 286 the output-layer weights ``w3``). 287 - error_type : str 288 Set to ``"a_priori"``. 289 - extra : dict, optional 290 Present only if ``return_internal_states=True`` with: 291 - ``w1_hist`` : list of ndarray 292 Hidden-layer-1 weight snapshots. 293 - ``w2_hist`` : list of ndarray 294 Hidden-layer-2 weight snapshots. 295 - ``w3_hist`` : list of ndarray 296 Output-layer weight snapshots. 297 - ``b1_hist`` : list of ndarray 298 Bias-1 snapshots. 299 - ``b2_hist`` : list of ndarray 300 Bias-2 snapshots. 301 - ``b3_hist`` : list of float 302 Bias-3 snapshots. 303 - ``activation`` : str 304 Activation identifier (``"tanh"`` or ``"sigmoid"``). 305 """ 306 t0 = perf_counter() 307 308 x_in = np.asarray(input_signal, dtype=np.float64) 309 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 310 311 U, _ = self._as_regressor_matrix(x_in, d_in, self.input_dim) 312 N = int(U.shape[0]) 313 314 outputs = np.zeros(N, dtype=np.float64) 315 errors = np.zeros(N, dtype=np.float64) 316 317 w1_hist: List[np.ndarray] = [] 318 w2_hist: List[np.ndarray] = [] 319 w3_hist: List[np.ndarray] = [] 320 b1_hist: List[np.ndarray] = [] 321 b2_hist: List[np.ndarray] = [] 322 b3_hist: List[float] = [] 323 324 for k in range(N): 325 u = U[k, :] 326 327 v1 = (self.w1 @ u) - self.b1 328 y1 = self.act_func(v1) 329 330 v2 = (self.w2 @ y1) - self.b2 331 y2 = self.act_func(v2) 332 333 y_k = float(np.dot(y2, self.w3) - self.b3) 334 outputs[k] = y_k 335 e_k = float(d_in[k] - y_k) 336 errors[k] = e_k 337 338 er_hid2 = e_k * self.w3 * self.act_deriv(v2) 339 er_hid1 = (self.w2.T @ er_hid2) * self.act_deriv(v1) 340 341 dw3 = (2.0 * self.step_size) * e_k * y2 342 self.w3 = self.w3 + dw3 + self.momentum * self.prev_dw3 343 self.prev_dw3 = dw3 344 345 db3 = (-2.0 * self.step_size) * e_k 346 self.b3 = float(self.b3 + db3 + self.momentum * self.prev_db3) 347 self.prev_db3 = db3 348 349 dw2 = (2.0 * self.step_size) * np.outer(er_hid2, y1) 350 self.w2 = self.w2 + dw2 + self.momentum * self.prev_dw2 351 self.prev_dw2 = dw2 352 353 db2 = (-2.0 * self.step_size) * er_hid2 354 self.b2 = self.b2 + db2 + self.momentum * self.prev_db2 355 self.prev_db2 = db2 356 357 dw1 = (2.0 * self.step_size) * np.outer(er_hid1, u) 358 self.w1 = self.w1 + dw1 + self.momentum * self.prev_dw1 359 self.prev_dw1 = dw1 360 361 db1 = (-2.0 * self.step_size) * er_hid1 362 self.b1 = self.b1 + db1 + self.momentum * self.prev_db1 363 self.prev_db1 = db1 364 365 self.w = self.w3.copy() 366 self._record_history() 367 368 if return_internal_states: 369 w1_hist.append(self.w1.copy()) 370 w2_hist.append(self.w2.copy()) 371 w3_hist.append(self.w3.copy()) 372 b1_hist.append(self.b1.copy()) 373 b2_hist.append(self.b2.copy()) 374 b3_hist.append(float(self.b3)) 375 376 runtime_s = float(perf_counter() - t0) 377 if verbose: 378 print(f"[MultilayerPerceptron] Completed in {runtime_s * 1000:.03f} ms") 379 380 extra: Optional[Dict[str, Any]] = None 381 if return_internal_states: 382 extra = { 383 "w1_hist": w1_hist, 384 "w2_hist": w2_hist, 385 "w3_hist": w3_hist, 386 "b1_hist": b1_hist, 387 "b2_hist": b2_hist, 388 "b3_hist": b3_hist, 389 "activation": "tanh" if self.act_func is _tanh else "sigmoid", 390 } 391 392 return self._pack_results( 393 outputs=outputs, 394 errors=errors, 395 runtime_s=runtime_s, 396 error_type="a_priori", 397 extra=extra, 398 )
Executes the online MLP adaptation loop (with momentum).
Parameters
input_signal : array_like of float
Either:
- regressor matrix U with shape (N, input_dim), or
- scalar input signal x[k] with shape (N,) (in which case the
regressor is built as u[k] = [x[k], d[k-1], x[k-1]] and
requires input_dim = 3).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, stores parameter snapshots in result.extra (may be memory
intensive for long runs).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence y[k].
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Proxy coefficient history recorded by the base class (tracks
the output-layer weights w3).
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- w1_hist : list of ndarray
Hidden-layer-1 weight snapshots.
- w2_hist : list of ndarray
Hidden-layer-2 weight snapshots.
- w3_hist : list of ndarray
Output-layer weight snapshots.
- b1_hist : list of ndarray
Bias-1 snapshots.
- b2_hist : list of ndarray
Bias-2 snapshots.
- b3_hist : list of float
Bias-3 snapshots.
- activation : str
Activation identifier ("tanh" or "sigmoid").
29class RBF(AdaptiveFilter): 30 """ 31 Radial Basis Function (RBF) adaptive model (real-valued). 32 33 Online adaptation of an RBF network with Gaussian basis functions, following 34 Diniz (Alg. 11.5). The algorithm updates: 35 - output weights ``w`` (one weight per neuron), 36 - centers ``c_i`` (stored in ``vet``), 37 - spreads ``sigma_i`` (stored in ``sigma``). 38 39 Parameters 40 ---------- 41 n_neurons : int 42 Number of RBF neurons (basis functions). 43 input_dim : int 44 Dimension of the regressor vector ``u[k]``. If :meth:`optimize` is called 45 with a 1D input signal, this is interpreted as the tap length. 46 ur : float, optional 47 Step size for center updates. Default is 1e-2. 48 uw : float, optional 49 Step size for output-weight updates. Default is 1e-2. 50 us : float, optional 51 Step size for spread (sigma) updates. Default is 1e-2. 52 w_init : array_like of float, optional 53 Initial output-weight vector ``w(0)`` with shape ``(n_neurons,)``. 54 If None, initializes from a standard normal distribution. 55 sigma_init : float, optional 56 Initial spread value used for all neurons (must be positive). Default is 1.0. 57 centers_init_scale : float, optional 58 Scale used for random initialization of centers. Default is 0.5. 59 rng : numpy.random.Generator, optional 60 Random generator used for reproducible initialization. 61 safe_eps : float, optional 62 Small positive constant used to guard denominators (e.g., ``sigma^2`` and 63 ``sigma^3``). Default is 1e-12. 64 65 Notes 66 ----- 67 Real-valued only 68 This implementation is restricted to real-valued signals and parameters 69 (``supports_complex=False``). The constraint is enforced via 70 ``@ensure_real_signals`` on :meth:`optimize`. 71 72 Model 73 For a regressor vector ``u[k] \\in \\mathbb{R}^{D}``, define Gaussian basis 74 functions: 75 76 .. math:: 77 \\phi_i(u[k]) = \\exp\\left(-\\frac{\\|u[k] - c_i\\|^2}{\\sigma_i^2}\\right), 78 79 where ``c_i`` is the center and ``sigma_i > 0`` is the spread of neuron ``i``. 80 The network output is 81 82 .. math:: 83 y[k] = \\sum_{i=1}^{Q} w_i\\, \\phi_i(u[k]) = w^T \\phi(u[k]), 84 85 where ``Q = n_neurons`` and ``\\phi(u[k]) \\in \\mathbb{R}^{Q}`` stacks all 86 activations. 87 88 Input formats 89 The method :meth:`optimize` accepts two input formats: 90 91 1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``: 92 each row is used directly as ``u[k]``. 93 94 2. **Scalar input signal** ``x[k]`` with shape ``(N,)``: 95 tapped-delay regressors of length ``input_dim`` are built as 96 97 .. math:: 98 u[k] = [x[k], x[k-1], \\ldots, x[k-input\\_dim+1]]^T. 99 100 Library conventions 101 - ``OptimizationResult.coefficients`` stores the history of the **output 102 weights** ``w`` (the neuron output layer). 103 - Centers and spreads are returned via ``result.extra`` when 104 ``return_internal_states=True``. 105 106 References 107 ---------- 108 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 109 Implementation*, 5th ed., Algorithm 11.5. 110 """ 111 supports_complex: bool = False 112 113 def __init__( 114 self, 115 n_neurons: int, 116 input_dim: int, 117 ur: float = 0.01, 118 uw: float = 0.01, 119 us: float = 0.01, 120 w_init: Optional[ArrayLike] = None, 121 *, 122 sigma_init: float = 1.0, 123 centers_init_scale: float = 0.5, 124 rng: Optional[np.random.Generator] = None, 125 safe_eps: float = 1e-12, 126 ) -> None: 127 n_neurons = int(n_neurons) 128 input_dim = int(input_dim) 129 if n_neurons <= 0: 130 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 131 if input_dim <= 0: 132 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 133 if float(sigma_init) <= 0.0: 134 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 135 136 super().__init__(filter_order=n_neurons - 1, w_init=None) 137 138 self.n_neurons = n_neurons 139 self.input_dim = input_dim 140 self.ur = float(ur) 141 self.uw = float(uw) 142 self.us = float(us) 143 144 self._safe_eps = float(safe_eps) 145 self._rng = rng if rng is not None else np.random.default_rng() 146 147 if w_init is None: 148 self.w = self._rng.standard_normal(n_neurons).astype(np.float64) 149 else: 150 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 151 if w0.size != n_neurons: 152 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 153 self.w = w0 154 155 self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype( 156 np.float64 157 ) 158 self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init) 159 160 self.w_history = [] 161 self._record_history() 162 163 @staticmethod 164 def _build_regressors_1d(x: np.ndarray, input_dim: int) -> np.ndarray: 165 """Build tapped-delay regressors u[k]=[x[k], x[k-1], ..., x[k-input_dim+1]].""" 166 x = np.asarray(x, dtype=np.float64).ravel() 167 N = int(x.size) 168 m = int(input_dim) - 1 169 x_pad = np.zeros(N + m, dtype=np.float64) 170 x_pad[m:] = x 171 return np.array([x_pad[k : k + m + 1][::-1] for k in range(N)], dtype=np.float64) 172 173 @staticmethod 174 def _as_regressor_matrix(x_in: np.ndarray, input_dim: int) -> Tuple[np.ndarray, int]: 175 """Return (U, N) from either (N,input_dim) or (N,) input.""" 176 x_in = np.asarray(x_in, dtype=np.float64) 177 if x_in.ndim == 2: 178 if x_in.shape[1] != input_dim: 179 raise ValueError(f"input_signal must have shape (N,{input_dim}). Got {x_in.shape}.") 180 return x_in.astype(np.float64, copy=False), int(x_in.shape[0]) 181 if x_in.ndim == 1: 182 U = RBF._build_regressors_1d(x_in, input_dim=input_dim) 183 return U, int(U.shape[0]) 184 raise ValueError("input_signal must be 1D (signal) or 2D (regressor matrix).") 185 186 @ensure_real_signals 187 def optimize( 188 self, 189 input_signal: Union[np.ndarray, list], 190 desired_signal: Union[np.ndarray, list], 191 verbose: bool = False, 192 return_internal_states: bool = False, 193 ) -> OptimizationResult: 194 """ 195 Executes the RBF online adaptation loop. 196 197 Parameters 198 ---------- 199 input_signal : array_like of float 200 Either: 201 - regressor matrix ``U`` with shape ``(N, input_dim)``, or 202 - scalar input signal ``x[k]`` with shape ``(N,)`` (tapped-delay 203 regressors are built internally). 204 desired_signal : array_like of float 205 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 206 verbose : bool, optional 207 If True, prints the total runtime after completion. 208 return_internal_states : bool, optional 209 If True, includes final centers/spreads and last activation vector 210 in ``result.extra``. 211 212 Returns 213 ------- 214 OptimizationResult 215 Result object with fields: 216 - outputs : ndarray of float, shape ``(N,)`` 217 Scalar output sequence ``y[k] = w^T \\phi(u[k])``. 218 - errors : ndarray of float, shape ``(N,)`` 219 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 220 - coefficients : ndarray of float 221 Output-weight history recorded by the base class. 222 - error_type : str 223 Set to ``"a_priori"``. 224 - extra : dict, optional 225 Present only if ``return_internal_states=True`` with: 226 - ``centers_last`` : ndarray of float 227 Final centers array (shape ``(n_neurons, input_dim)``). 228 - ``sigma_last`` : ndarray of float 229 Final spreads vector (shape ``(n_neurons,)``). 230 - ``last_phi`` : ndarray of float 231 Last basis-function activation vector ``\\phi(u[k])`` (shape ``(n_neurons,)``). 232 """ 233 t0 = perf_counter() 234 235 x_in = np.asarray(input_signal, dtype=np.float64) 236 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 237 238 U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim) 239 if d_in.size != N: 240 raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).") 241 242 outputs = np.zeros(N, dtype=np.float64) 243 errors = np.zeros(N, dtype=np.float64) 244 245 last_phi: Optional[np.ndarray] = None 246 247 for k in range(N): 248 u = U[k, :] 249 250 diff = u[None, :] - self.vet 251 dis_sq = np.sum(diff * diff, axis=1) 252 253 sigma_sq = (self.sigma * self.sigma) + self._safe_eps 254 phi = np.exp(-dis_sq / sigma_sq) 255 last_phi = phi 256 257 y_k = float(np.dot(self.w, phi)) 258 outputs[k] = y_k 259 260 e_k = float(d_in[k] - y_k) 261 errors[k] = e_k 262 263 self.w = self.w + (2.0 * self.uw) * e_k * phi 264 265 sigma_cu = np.maximum(self.sigma, self._safe_eps) 266 self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3) 267 268 denom_c = (sigma_cu**2) + self._safe_eps 269 for p in range(self.n_neurons): 270 self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p] 271 272 self._record_history() 273 274 runtime_s = float(perf_counter() - t0) 275 if verbose: 276 print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms") 277 278 extra: Optional[Dict[str, Any]] = None 279 if return_internal_states: 280 extra = { 281 "centers_last": self.vet.copy(), 282 "sigma_last": self.sigma.copy(), 283 "last_phi": None if last_phi is None else last_phi.copy(), 284 } 285 286 return self._pack_results( 287 outputs=outputs, 288 errors=errors, 289 runtime_s=runtime_s, 290 error_type="a_priori", 291 extra=extra, 292 )
Radial Basis Function (RBF) adaptive model (real-valued).
Online adaptation of an RBF network with Gaussian basis functions, following Diniz (Alg. 11.5). The algorithm updates:
- output weights
w(one weight per neuron), - centers
c_i(stored invet), - spreads
sigma_i(stored insigma).
Parameters
n_neurons : int
Number of RBF neurons (basis functions).
input_dim : int
Dimension of the regressor vector u[k]. If optimize() is called
with a 1D input signal, this is interpreted as the tap length.
ur : float, optional
Step size for center updates. Default is 1e-2.
uw : float, optional
Step size for output-weight updates. Default is 1e-2.
us : float, optional
Step size for spread (sigma) updates. Default is 1e-2.
w_init : array_like of float, optional
Initial output-weight vector w(0) with shape (n_neurons,).
If None, initializes from a standard normal distribution.
sigma_init : float, optional
Initial spread value used for all neurons (must be positive). Default is 1.0.
centers_init_scale : float, optional
Scale used for random initialization of centers. Default is 0.5.
rng : numpy.random.Generator, optional
Random generator used for reproducible initialization.
safe_eps : float, optional
Small positive constant used to guard denominators (e.g., sigma^2 and
sigma^3). Default is 1e-12.
Notes
Real-valued only
This implementation is restricted to real-valued signals and parameters
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Model
For a regressor vector u[k] \in \mathbb{R}^{D}, define Gaussian basis
functions:
$$\phi_i(u[k]) = \exp\left(-\frac{\|u[k] - c_i\|^2}{\sigma_i^2}\right),$$
where ``c_i`` is the center and ``sigma_i > 0`` is the spread of neuron ``i``.
The network output is
$$y[k] = \sum_{i=1}^{Q} w_i\, \phi_i(u[k]) = w^T \phi(u[k]),$$
where ``Q = n_neurons`` and ``\phi(u[k]) \in \mathbb{R}^{Q}`` stacks all
activations.
Input formats
The method optimize() accepts two input formats:
1. **Regressor matrix** ``U`` with shape ``(N, input_dim)``:
each row is used directly as ``u[k]``.
2. **Scalar input signal** ``x[k]`` with shape ``(N,)``:
tapped-delay regressors of length ``input_dim`` are built as
$$u[k] = [x[k], x[k-1], \ldots, x[k-input\_dim+1]]^T.$$
Library conventions
- OptimizationResult.coefficients stores the history of the output
weights w (the neuron output layer).
- Centers and spreads are returned via result.extra when
return_internal_states=True.
References
113 def __init__( 114 self, 115 n_neurons: int, 116 input_dim: int, 117 ur: float = 0.01, 118 uw: float = 0.01, 119 us: float = 0.01, 120 w_init: Optional[ArrayLike] = None, 121 *, 122 sigma_init: float = 1.0, 123 centers_init_scale: float = 0.5, 124 rng: Optional[np.random.Generator] = None, 125 safe_eps: float = 1e-12, 126 ) -> None: 127 n_neurons = int(n_neurons) 128 input_dim = int(input_dim) 129 if n_neurons <= 0: 130 raise ValueError(f"n_neurons must be > 0. Got {n_neurons}.") 131 if input_dim <= 0: 132 raise ValueError(f"input_dim must be > 0. Got {input_dim}.") 133 if float(sigma_init) <= 0.0: 134 raise ValueError(f"sigma_init must be > 0. Got {sigma_init}.") 135 136 super().__init__(filter_order=n_neurons - 1, w_init=None) 137 138 self.n_neurons = n_neurons 139 self.input_dim = input_dim 140 self.ur = float(ur) 141 self.uw = float(uw) 142 self.us = float(us) 143 144 self._safe_eps = float(safe_eps) 145 self._rng = rng if rng is not None else np.random.default_rng() 146 147 if w_init is None: 148 self.w = self._rng.standard_normal(n_neurons).astype(np.float64) 149 else: 150 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 151 if w0.size != n_neurons: 152 raise ValueError(f"w_init must have length {n_neurons}, got {w0.size}.") 153 self.w = w0 154 155 self.vet = (float(centers_init_scale) * self._rng.standard_normal((n_neurons, input_dim))).astype( 156 np.float64 157 ) 158 self.sigma = np.ones(n_neurons, dtype=np.float64) * float(sigma_init) 159 160 self.w_history = [] 161 self._record_history()
186 @ensure_real_signals 187 def optimize( 188 self, 189 input_signal: Union[np.ndarray, list], 190 desired_signal: Union[np.ndarray, list], 191 verbose: bool = False, 192 return_internal_states: bool = False, 193 ) -> OptimizationResult: 194 """ 195 Executes the RBF online adaptation loop. 196 197 Parameters 198 ---------- 199 input_signal : array_like of float 200 Either: 201 - regressor matrix ``U`` with shape ``(N, input_dim)``, or 202 - scalar input signal ``x[k]`` with shape ``(N,)`` (tapped-delay 203 regressors are built internally). 204 desired_signal : array_like of float 205 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 206 verbose : bool, optional 207 If True, prints the total runtime after completion. 208 return_internal_states : bool, optional 209 If True, includes final centers/spreads and last activation vector 210 in ``result.extra``. 211 212 Returns 213 ------- 214 OptimizationResult 215 Result object with fields: 216 - outputs : ndarray of float, shape ``(N,)`` 217 Scalar output sequence ``y[k] = w^T \\phi(u[k])``. 218 - errors : ndarray of float, shape ``(N,)`` 219 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 220 - coefficients : ndarray of float 221 Output-weight history recorded by the base class. 222 - error_type : str 223 Set to ``"a_priori"``. 224 - extra : dict, optional 225 Present only if ``return_internal_states=True`` with: 226 - ``centers_last`` : ndarray of float 227 Final centers array (shape ``(n_neurons, input_dim)``). 228 - ``sigma_last`` : ndarray of float 229 Final spreads vector (shape ``(n_neurons,)``). 230 - ``last_phi`` : ndarray of float 231 Last basis-function activation vector ``\\phi(u[k])`` (shape ``(n_neurons,)``). 232 """ 233 t0 = perf_counter() 234 235 x_in = np.asarray(input_signal, dtype=np.float64) 236 d_in = np.asarray(desired_signal, dtype=np.float64).ravel() 237 238 U, N = self._as_regressor_matrix(x_in, input_dim=self.input_dim) 239 if d_in.size != N: 240 raise ValueError(f"Shape mismatch: input({N}) and desired({d_in.size}).") 241 242 outputs = np.zeros(N, dtype=np.float64) 243 errors = np.zeros(N, dtype=np.float64) 244 245 last_phi: Optional[np.ndarray] = None 246 247 for k in range(N): 248 u = U[k, :] 249 250 diff = u[None, :] - self.vet 251 dis_sq = np.sum(diff * diff, axis=1) 252 253 sigma_sq = (self.sigma * self.sigma) + self._safe_eps 254 phi = np.exp(-dis_sq / sigma_sq) 255 last_phi = phi 256 257 y_k = float(np.dot(self.w, phi)) 258 outputs[k] = y_k 259 260 e_k = float(d_in[k] - y_k) 261 errors[k] = e_k 262 263 self.w = self.w + (2.0 * self.uw) * e_k * phi 264 265 sigma_cu = np.maximum(self.sigma, self._safe_eps) 266 self.sigma = self.sigma + (2.0 * self.us) * e_k * phi * self.w * dis_sq / (sigma_cu**3) 267 268 denom_c = (sigma_cu**2) + self._safe_eps 269 for p in range(self.n_neurons): 270 self.vet[p] = self.vet[p] + (2.0 * self.ur) * phi[p] * e_k * self.w[p] * (u - self.vet[p]) / denom_c[p] 271 272 self._record_history() 273 274 runtime_s = float(perf_counter() - t0) 275 if verbose: 276 print(f"[RBF] Completed in {runtime_s * 1000:.03f} ms") 277 278 extra: Optional[Dict[str, Any]] = None 279 if return_internal_states: 280 extra = { 281 "centers_last": self.vet.copy(), 282 "sigma_last": self.sigma.copy(), 283 "last_phi": None if last_phi is None else last_phi.copy(), 284 } 285 286 return self._pack_results( 287 outputs=outputs, 288 errors=errors, 289 runtime_s=runtime_s, 290 error_type="a_priori", 291 extra=extra, 292 )
Executes the RBF online adaptation loop.
Parameters
input_signal : array_like of float
Either:
- regressor matrix U with shape (N, input_dim), or
- scalar input signal x[k] with shape (N,) (tapped-delay
regressors are built internally).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes final centers/spreads and last activation vector
in result.extra.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar output sequence y[k] = w^T \phi(u[k]).
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Output-weight history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- centers_last : ndarray of float
Final centers array (shape (n_neurons, input_dim)).
- sigma_last : ndarray of float
Final spreads vector (shape (n_neurons,)).
- last_phi : ndarray of float
Last basis-function activation vector \phi(u[k]) (shape (n_neurons,)).
29class VolterraLMS(AdaptiveFilter): 30 """ 31 Second-order Volterra LMS adaptive filter (real-valued). 32 33 Volterra LMS (Diniz, Alg. 11.1) using a second-order Volterra expansion. 34 The adaptive model augments a linear tapped-delay regressor with all 35 quadratic products (including squares) and performs an LMS-type update on 36 the expanded coefficient vector. 37 38 Parameters 39 ---------- 40 memory : int, optional 41 Linear memory length ``L``. The linear delay line is 42 ``[x[k], x[k-1], ..., x[k-L+1]]``. Default is 3. 43 step_size : float or array_like of float, optional 44 Step size ``mu``. Can be either: 45 - a scalar (same step for all coefficients), or 46 - a vector with shape ``(n_coeffs,)`` for per-term step scaling. 47 Default is 1e-2. 48 w_init : array_like of float, optional 49 Initial coefficient vector ``w(0)`` with shape ``(n_coeffs,)``. If None, 50 initializes with zeros. 51 safe_eps : float, optional 52 Small positive constant kept for API consistency across the library. 53 (Not used directly by this implementation.) Default is 1e-12. 54 55 Notes 56 ----- 57 Real-valued only 58 This implementation is restricted to real-valued signals and coefficients 59 (``supports_complex=False``). The constraint is enforced via 60 ``@ensure_real_signals`` on :meth:`optimize`. 61 62 Volterra regressor (as implemented) 63 Let the linear delay line be 64 65 .. math:: 66 x_{lin}[k] = [x[k], x[k-1], \\ldots, x[k-L+1]]^T \\in \\mathbb{R}^{L}. 67 68 The second-order Volterra regressor is constructed as 69 70 .. math:: 71 u[k] = 72 \\begin{bmatrix} 73 x_{lin}[k] \\\\ 74 \\mathrm{vec}\\bigl(x_{lin}[k] x_{lin}^T[k]\\bigr)_{i \\le j} 75 \\end{bmatrix} 76 \\in \\mathbb{R}^{n_{coeffs}}, 77 78 where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]`` 79 for ``0 \\le i \\le j \\le L-1`` (unique terms only). 80 81 The number of coefficients is therefore 82 83 .. math:: 84 n_{coeffs} = L + \\frac{L(L+1)}{2}. 85 86 LMS recursion (a priori) 87 With 88 89 .. math:: 90 y[k] = w^T[k] u[k], \\qquad e[k] = d[k] - y[k], 91 92 the update implemented here is 93 94 .. math:: 95 w[k+1] = w[k] + 2\\mu\\, e[k] \\, u[k], 96 97 where ``\\mu`` may be scalar or element-wise (vector step). 98 99 Implementation details 100 - The coefficient vector ``self.w`` stores the full Volterra parameter 101 vector (linear + quadratic) and is recorded by the base class. 102 - The quadratic term ordering matches the nested loops used in 103 :meth:`_create_volterra_regressor` (i increasing, j from i to L-1). 104 105 References 106 ---------- 107 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 108 Implementation*, 5th ed., Algorithm 11.1. 109 """ 110 111 supports_complex: bool = False 112 113 def __init__( 114 self, 115 memory: int = 3, 116 step_size: Union[float, np.ndarray, list] = 1e-2, 117 w_init: Optional[ArrayLike] = None, 118 *, 119 safe_eps: float = 1e-12, 120 ) -> None: 121 memory = int(memory) 122 if memory <= 0: 123 raise ValueError(f"memory must be > 0. Got {memory}.") 124 125 self.memory: int = memory 126 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 127 self._safe_eps: float = float(safe_eps) 128 129 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 130 131 if isinstance(step_size, (list, np.ndarray)): 132 step_vec = np.asarray(step_size, dtype=np.float64).reshape(-1) 133 if step_vec.size != self.n_coeffs: 134 raise ValueError( 135 f"step vector must have length {self.n_coeffs}, got {step_vec.size}." 136 ) 137 self.step_size: Union[float, np.ndarray] = step_vec 138 else: 139 self.step_size = float(step_size) 140 141 self.w = np.asarray(self.w, dtype=np.float64) 142 143 self.w_history = [] 144 self._record_history() 145 146 def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray: 147 """ 148 Constructs the second-order Volterra regressor from a linear delay line. 149 150 Parameters 151 ---------- 152 x_lin : ndarray of float 153 Linear delay line with shape ``(L,)`` ordered as 154 ``[x[k], x[k-1], ..., x[k-L+1]]``. 155 156 Returns 157 ------- 158 ndarray of float 159 Volterra regressor ``u[k]`` with shape ``(n_coeffs,)`` containing: 160 - linear terms, followed by 161 - quadratic terms for ``i <= j``. 162 """ 163 x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1) 164 if x_lin.size != self.memory: 165 raise ValueError( 166 f"x_lin must have length {self.memory}, got {x_lin.size}." 167 ) 168 169 quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64) 170 idx = 0 171 for i in range(self.memory): 172 for j in range(i, self.memory): 173 quad[idx] = x_lin[i] * x_lin[j] 174 idx += 1 175 176 return np.concatenate([x_lin, quad], axis=0) 177 178 @ensure_real_signals 179 def optimize( 180 self, 181 input_signal: Union[np.ndarray, list], 182 desired_signal: Union[np.ndarray, list], 183 verbose: bool = False, 184 return_internal_states: bool = False, 185 ) -> OptimizationResult: 186 """ 187 Executes the Volterra LMS adaptation loop over paired input/desired sequences. 188 189 Parameters 190 ---------- 191 input_signal : array_like of float 192 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 193 desired_signal : array_like of float 194 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 195 verbose : bool, optional 196 If True, prints the total runtime after completion. 197 return_internal_states : bool, optional 198 If True, includes the last internal states in ``result.extra``: 199 ``"last_regressor"``, ``"memory"``, and ``"n_coeffs"``. 200 201 Returns 202 ------- 203 OptimizationResult 204 Result object with fields: 205 - outputs : ndarray of float, shape ``(N,)`` 206 Scalar a priori output sequence, ``y[k] = w^T[k] u[k]``. 207 - errors : ndarray of float, shape ``(N,)`` 208 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 209 - coefficients : ndarray of float 210 Volterra coefficient history recorded by the base class. 211 - error_type : str 212 Set to ``"a_priori"``. 213 - extra : dict, optional 214 Present only if ``return_internal_states=True``. 215 """ 216 t0 = perf_counter() 217 218 x = np.asarray(input_signal, dtype=np.float64).ravel() 219 d = np.asarray(desired_signal, dtype=np.float64).ravel() 220 221 if x.size != d.size: 222 raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})") 223 n_samples = int(x.size) 224 225 outputs = np.zeros(n_samples, dtype=np.float64) 226 errors = np.zeros(n_samples, dtype=np.float64) 227 228 L = int(self.memory) 229 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 230 x_padded[L - 1 :] = x 231 232 last_u: Optional[np.ndarray] = None 233 234 for k in range(n_samples): 235 x_lin = x_padded[k : k + L][::-1] 236 u = self._create_volterra_regressor(x_lin) 237 last_u = u 238 239 y_k = float(np.dot(self.w, u)) 240 outputs[k] = y_k 241 242 e_k = float(d[k] - y_k) 243 errors[k] = e_k 244 245 if isinstance(self.step_size, np.ndarray): 246 self.w = self.w + (2.0 * self.step_size) * e_k * u 247 else: 248 self.w = self.w + (2.0 * float(self.step_size)) * e_k * u 249 250 self._record_history() 251 252 runtime_s = float(perf_counter() - t0) 253 if verbose: 254 print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms") 255 256 extra: Optional[Dict[str, Any]] = None 257 if return_internal_states: 258 extra = { 259 "last_regressor": None if last_u is None else last_u.copy(), 260 "memory": int(self.memory), 261 "n_coeffs": int(self.n_coeffs), 262 } 263 264 return self._pack_results( 265 outputs=outputs, 266 errors= errors, 267 runtime_s=runtime_s, 268 error_type="a_priori", 269 extra=extra, 270 )
Second-order Volterra LMS adaptive filter (real-valued).
Volterra LMS (Diniz, Alg. 11.1) using a second-order Volterra expansion. The adaptive model augments a linear tapped-delay regressor with all quadratic products (including squares) and performs an LMS-type update on the expanded coefficient vector.
Parameters
memory : int, optional
Linear memory length L. The linear delay line is
[x[k], x[k-1], ..., x[k-L+1]]. Default is 3.
step_size : float or array_like of float, optional
Step size mu. Can be either:
- a scalar (same step for all coefficients), or
- a vector with shape (n_coeffs,) for per-term step scaling.
Default is 1e-2.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (n_coeffs,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant kept for API consistency across the library.
(Not used directly by this implementation.) Default is 1e-12.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Volterra regressor (as implemented) Let the linear delay line be
$$x_{lin}[k] = [x[k], x[k-1], \ldots, x[k-L+1]]^T \in \mathbb{R}^{L}.$$
The second-order Volterra regressor is constructed as
$$u[k] =
\begin{bmatrix} x_{lin}[k] \ \mathrm{vec}\bigl(x_{lin}[k] x_{lin}^T[k]\bigr)_{i \le j} \end{bmatrix} \in \mathbb{R}^{n_{coeffs}},$$
where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
for ``0 \le i \le j \le L-1`` (unique terms only).
The number of coefficients is therefore
$$n_{coeffs} = L + \frac{L(L+1)}{2}.$$
LMS recursion (a priori) With
$$y[k] = w^T[k] u[k], \qquad e[k] = d[k] - y[k],$$
the update implemented here is
$$w[k+1] = w[k] + 2\mu\, e[k] \, u[k],$$
where ``\mu`` may be scalar or element-wise (vector step).
Implementation details
- The coefficient vector self.w stores the full Volterra parameter
vector (linear + quadratic) and is recorded by the base class.
- The quadratic term ordering matches the nested loops used in
_create_volterra_regressor() (i increasing, j from i to L-1).
References
113 def __init__( 114 self, 115 memory: int = 3, 116 step_size: Union[float, np.ndarray, list] = 1e-2, 117 w_init: Optional[ArrayLike] = None, 118 *, 119 safe_eps: float = 1e-12, 120 ) -> None: 121 memory = int(memory) 122 if memory <= 0: 123 raise ValueError(f"memory must be > 0. Got {memory}.") 124 125 self.memory: int = memory 126 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 127 self._safe_eps: float = float(safe_eps) 128 129 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 130 131 if isinstance(step_size, (list, np.ndarray)): 132 step_vec = np.asarray(step_size, dtype=np.float64).reshape(-1) 133 if step_vec.size != self.n_coeffs: 134 raise ValueError( 135 f"step vector must have length {self.n_coeffs}, got {step_vec.size}." 136 ) 137 self.step_size: Union[float, np.ndarray] = step_vec 138 else: 139 self.step_size = float(step_size) 140 141 self.w = np.asarray(self.w, dtype=np.float64) 142 143 self.w_history = [] 144 self._record_history()
178 @ensure_real_signals 179 def optimize( 180 self, 181 input_signal: Union[np.ndarray, list], 182 desired_signal: Union[np.ndarray, list], 183 verbose: bool = False, 184 return_internal_states: bool = False, 185 ) -> OptimizationResult: 186 """ 187 Executes the Volterra LMS adaptation loop over paired input/desired sequences. 188 189 Parameters 190 ---------- 191 input_signal : array_like of float 192 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 193 desired_signal : array_like of float 194 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 195 verbose : bool, optional 196 If True, prints the total runtime after completion. 197 return_internal_states : bool, optional 198 If True, includes the last internal states in ``result.extra``: 199 ``"last_regressor"``, ``"memory"``, and ``"n_coeffs"``. 200 201 Returns 202 ------- 203 OptimizationResult 204 Result object with fields: 205 - outputs : ndarray of float, shape ``(N,)`` 206 Scalar a priori output sequence, ``y[k] = w^T[k] u[k]``. 207 - errors : ndarray of float, shape ``(N,)`` 208 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 209 - coefficients : ndarray of float 210 Volterra coefficient history recorded by the base class. 211 - error_type : str 212 Set to ``"a_priori"``. 213 - extra : dict, optional 214 Present only if ``return_internal_states=True``. 215 """ 216 t0 = perf_counter() 217 218 x = np.asarray(input_signal, dtype=np.float64).ravel() 219 d = np.asarray(desired_signal, dtype=np.float64).ravel() 220 221 if x.size != d.size: 222 raise ValueError(f"Inconsistent lengths: input({x.size}) != desired({d.size})") 223 n_samples = int(x.size) 224 225 outputs = np.zeros(n_samples, dtype=np.float64) 226 errors = np.zeros(n_samples, dtype=np.float64) 227 228 L = int(self.memory) 229 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 230 x_padded[L - 1 :] = x 231 232 last_u: Optional[np.ndarray] = None 233 234 for k in range(n_samples): 235 x_lin = x_padded[k : k + L][::-1] 236 u = self._create_volterra_regressor(x_lin) 237 last_u = u 238 239 y_k = float(np.dot(self.w, u)) 240 outputs[k] = y_k 241 242 e_k = float(d[k] - y_k) 243 errors[k] = e_k 244 245 if isinstance(self.step_size, np.ndarray): 246 self.w = self.w + (2.0 * self.step_size) * e_k * u 247 else: 248 self.w = self.w + (2.0 * float(self.step_size)) * e_k * u 249 250 self._record_history() 251 252 runtime_s = float(perf_counter() - t0) 253 if verbose: 254 print(f"[VolterraLMS] Completed in {runtime_s * 1000:.03f} ms") 255 256 extra: Optional[Dict[str, Any]] = None 257 if return_internal_states: 258 extra = { 259 "last_regressor": None if last_u is None else last_u.copy(), 260 "memory": int(self.memory), 261 "n_coeffs": int(self.n_coeffs), 262 } 263 264 return self._pack_results( 265 outputs=outputs, 266 errors= errors, 267 runtime_s=runtime_s, 268 error_type="a_priori", 269 extra=extra, 270 )
Executes the Volterra LMS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"last_regressor", "memory", and "n_coeffs".
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar a priori output sequence, y[k] = w^T[k] u[k].
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Volterra coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True.
28class VolterraRLS(AdaptiveFilter): 29 """ 30 Second-order Volterra RLS adaptive filter (real-valued). 31 32 Volterra RLS (Diniz, Alg. 11.2) using a second-order Volterra expansion and 33 an RLS update applied to the expanded regressor. The model augments a linear 34 tapped-delay regressor with all unique quadratic products (including 35 squares) and estimates the corresponding coefficient vector via RLS. 36 37 Parameters 38 ---------- 39 memory : int, optional 40 Linear memory length ``L``. The linear delay line is 41 ``[x[k], x[k-1], ..., x[k-L+1]]``. Default is 3. 42 forgetting_factor : float, optional 43 Forgetting factor ``lambda`` with ``0 < lambda <= 1``. Default is 0.98. 44 delta : float, optional 45 Regularization parameter used to initialize the inverse correlation 46 matrix as ``P(0) = I/delta`` (requires ``delta > 0``). Default is 1.0. 47 w_init : array_like of float, optional 48 Initial coefficient vector ``w(0)`` with shape ``(n_coeffs,)``. If None, 49 initializes with zeros. 50 safe_eps : float, optional 51 Small positive constant used to guard denominators. Default is 1e-12. 52 53 Notes 54 ----- 55 Real-valued only 56 This implementation is restricted to real-valued signals and coefficients 57 (``supports_complex=False``). The constraint is enforced via 58 ``@ensure_real_signals`` on :meth:`optimize`. 59 60 Volterra regressor (as implemented) 61 Let the linear delay line be 62 63 .. math:: 64 x_{lin}[k] = [x[k], x[k-1], \\ldots, x[k-L+1]]^T \\in \\mathbb{R}^{L}. 65 66 The second-order Volterra regressor is constructed as 67 68 .. math:: 69 u[k] = 70 \\begin{bmatrix} 71 x_{lin}[k] \\\\ 72 \\mathrm{vec}\\bigl(x_{lin}[k] x_{lin}^T[k]\\bigr)_{i \\le j} 73 \\end{bmatrix} 74 \\in \\mathbb{R}^{n_{coeffs}}, 75 76 where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]`` 77 for ``0 \\le i \\le j \\le L-1``. 78 79 The number of coefficients is 80 81 .. math:: 82 n_{coeffs} = L + \\frac{L(L+1)}{2}. 83 84 RLS recursion (a priori form) 85 With 86 87 .. math:: 88 y[k] = w^T[k-1] u[k], \\qquad e[k] = d[k] - y[k], 89 90 define the gain 91 92 .. math:: 93 g[k] = \\frac{P[k-1] u[k]}{\\lambda + u^T[k] P[k-1] u[k]}, 94 95 the inverse correlation update 96 97 .. math:: 98 P[k] = \\frac{1}{\\lambda}\\left(P[k-1] - g[k] u^T[k] P[k-1]\\right), 99 100 and the coefficient update 101 102 .. math:: 103 w[k] = w[k-1] + g[k] e[k]. 104 105 A posteriori quantities 106 If requested, this implementation also computes the *a posteriori* 107 output/error after updating the coefficients at time ``k``: 108 109 .. math:: 110 y^{post}[k] = w^T[k] u[k], \\qquad e^{post}[k] = d[k] - y^{post}[k]. 111 112 Implementation details 113 - The denominator ``lambda + u^T P u`` is guarded by ``safe_eps`` to avoid 114 numerical issues when very small. 115 - Coefficient history is recorded via the base class. 116 - The quadratic-term ordering matches :meth:`_create_volterra_regressor`. 117 118 References 119 ---------- 120 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 121 Implementation*, 5th ed., Algorithm 11.2. 122 """ 123 124 supports_complex: bool = False 125 126 def __init__( 127 self, 128 memory: int = 3, 129 forgetting_factor: float = 0.98, 130 delta: float = 1.0, 131 w_init: Optional[ArrayLike] = None, 132 *, 133 safe_eps: float = 1e-12, 134 ) -> None: 135 """ 136 Parameters 137 ---------- 138 memory: 139 Linear memory length L. Determines number of Volterra coefficients: 140 n_coeffs = L + L(L+1)/2. 141 forgetting_factor: 142 Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. 143 delta: 144 Positive regularization for initializing the inverse correlation matrix: 145 P[0] = I / delta. 146 w_init: 147 Optional initial coefficient vector (length n_coeffs). If None, zeros. 148 safe_eps: 149 Small epsilon to guard denominators. 150 """ 151 memory = int(memory) 152 if memory <= 0: 153 raise ValueError(f"memory must be > 0. Got {memory}.") 154 155 lam = float(forgetting_factor) 156 if not (0.0 < lam <= 1.0): 157 raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.") 158 159 delta = float(delta) 160 if delta <= 0.0: 161 raise ValueError(f"delta must be > 0. Got delta={delta}.") 162 163 self.memory: int = memory 164 self.lam: float = lam 165 self._safe_eps: float = float(safe_eps) 166 167 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 168 169 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 170 171 self.w = np.asarray(self.w, dtype=np.float64) 172 173 if w_init is not None: 174 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 175 if w0.size != self.n_coeffs: 176 raise ValueError( 177 f"w_init must have length {self.n_coeffs}, got {w0.size}." 178 ) 179 self.w = w0.copy() 180 181 self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta) 182 183 self.w_history = [] 184 self._record_history() 185 186 def _create_volterra_regressor(self, x_lin: np.ndarray) -> np.ndarray: 187 """ 188 Constructs the second-order Volterra regressor from a linear delay line. 189 190 Parameters 191 ---------- 192 x_lin : ndarray of float 193 Linear delay line with shape ``(L,)`` ordered as 194 ``[x[k], x[k-1], ..., x[k-L+1]]``. 195 196 Returns 197 ------- 198 ndarray of float 199 Volterra regressor ``u[k]`` with shape ``(n_coeffs,)`` containing: 200 - linear terms, followed by 201 - quadratic terms for ``i <= j``. 202 """ 203 x_lin = np.asarray(x_lin, dtype=np.float64).reshape(-1) 204 if x_lin.size != self.memory: 205 raise ValueError(f"x_lin must have length {self.memory}, got {x_lin.size}.") 206 207 quad = np.empty((self.memory * (self.memory + 1)) // 2, dtype=np.float64) 208 idx = 0 209 for i in range(self.memory): 210 for j in range(i, self.memory): 211 quad[idx] = x_lin[i] * x_lin[j] 212 idx += 1 213 214 return np.concatenate([x_lin, quad], axis=0) 215 216 @ensure_real_signals 217 @validate_input 218 def optimize( 219 self, 220 input_signal: np.ndarray, 221 desired_signal: np.ndarray, 222 verbose: bool = False, 223 return_internal_states: bool = False, 224 ) -> OptimizationResult: 225 """ 226 Executes the Volterra RLS adaptation loop over paired input/desired sequences. 227 228 Parameters 229 ---------- 230 input_signal : array_like of float 231 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 232 desired_signal : array_like of float 233 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 234 verbose : bool, optional 235 If True, prints the total runtime after completion. 236 return_internal_states : bool, optional 237 If True, includes additional internal sequences in ``result.extra``, 238 including a posteriori output/error and last gain/denominator. 239 240 Returns 241 ------- 242 OptimizationResult 243 Result object with fields: 244 - outputs : ndarray of float, shape ``(N,)`` 245 Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``. 246 - errors : ndarray of float, shape ``(N,)`` 247 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 248 - coefficients : ndarray of float 249 Volterra coefficient history recorded by the base class. 250 - error_type : str 251 Set to ``"a_priori"``. 252 - extra : dict, optional 253 Present only if ``return_internal_states=True`` with: 254 - ``posteriori_outputs`` : ndarray of float 255 A posteriori output sequence ``y^{post}[k]``. 256 - ``posteriori_errors`` : ndarray of float 257 A posteriori error sequence ``e^{post}[k]``. 258 - ``last_gain`` : ndarray of float 259 Last RLS gain vector ``g[k]``. 260 - ``last_den`` : float 261 Last gain denominator ``lambda + u^T P u``. 262 - ``last_regressor`` : ndarray of float 263 Last Volterra regressor ``u[k]``. 264 - ``memory`` : int 265 Linear memory length ``L``. 266 - ``n_coeffs`` : int 267 Number of Volterra coefficients. 268 - ``forgetting_factor`` : float 269 The forgetting factor ``lambda`` used. 270 """ 271 t0 = perf_counter() 272 273 x = np.asarray(input_signal, dtype=np.float64).ravel() 274 d = np.asarray(desired_signal, dtype=np.float64).ravel() 275 276 n_samples = int(x.size) 277 278 outputs = np.zeros(n_samples, dtype=np.float64) 279 errors = np.zeros(n_samples, dtype=np.float64) 280 281 y_post = np.zeros(n_samples, dtype=np.float64) 282 e_post = np.zeros(n_samples, dtype=np.float64) 283 284 L = int(self.memory) 285 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 286 x_padded[L - 1 :] = x 287 288 last_k: Optional[np.ndarray] = None 289 last_den: Optional[float] = None 290 last_u: Optional[np.ndarray] = None 291 292 for k in range(n_samples): 293 x_lin = x_padded[k : k + L][::-1] 294 u = self._create_volterra_regressor(x_lin) 295 last_u = u 296 297 y_k = float(np.dot(self.w, u)) 298 e_k = float(d[k] - y_k) 299 outputs[k] = y_k 300 errors[k] = e_k 301 302 Pu = self.P @ u 303 den = float(self.lam + np.dot(u, Pu)) 304 if abs(den) < self._safe_eps: 305 den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps) 306 307 k_gain = Pu / den 308 last_k = k_gain 309 last_den = den 310 311 self.w = self.w + k_gain * e_k 312 313 self.P = (self.P - np.outer(k_gain, Pu)) / self.lam 314 315 yk_post = float(np.dot(self.w, u)) 316 ek_post = float(d[k] - yk_post) 317 y_post[k] = yk_post 318 e_post[k] = ek_post 319 320 self._record_history() 321 322 runtime_s = float(perf_counter() - t0) 323 if verbose: 324 print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms") 325 326 extra: Optional[Dict[str, Any]] = None 327 if return_internal_states: 328 extra = { 329 "posteriori_outputs": y_post, 330 "posteriori_errors": e_post, 331 "last_gain": None if last_k is None else last_k.copy(), 332 "last_den": last_den, 333 "last_regressor": None if last_u is None else last_u.copy(), 334 "memory": int(self.memory), 335 "n_coeffs": int(self.n_coeffs), 336 "forgetting_factor": float(self.lam), 337 } 338 339 return self._pack_results( 340 outputs=outputs, 341 errors=errors, 342 runtime_s=runtime_s, 343 error_type="a_priori", 344 extra=extra, 345 )
Second-order Volterra RLS adaptive filter (real-valued).
Volterra RLS (Diniz, Alg. 11.2) using a second-order Volterra expansion and an RLS update applied to the expanded regressor. The model augments a linear tapped-delay regressor with all unique quadratic products (including squares) and estimates the corresponding coefficient vector via RLS.
Parameters
memory : int, optional
Linear memory length L. The linear delay line is
[x[k], x[k-1], ..., x[k-L+1]]. Default is 3.
forgetting_factor : float, optional
Forgetting factor lambda with 0 < lambda <= 1. Default is 0.98.
delta : float, optional
Regularization parameter used to initialize the inverse correlation
matrix as P(0) = I/delta (requires delta > 0). Default is 1.0.
w_init : array_like of float, optional
Initial coefficient vector w(0) with shape (n_coeffs,). If None,
initializes with zeros.
safe_eps : float, optional
Small positive constant used to guard denominators. Default is 1e-12.
Notes
Real-valued only
This implementation is restricted to real-valued signals and coefficients
(supports_complex=False). The constraint is enforced via
@ensure_real_signals on optimize().
Volterra regressor (as implemented) Let the linear delay line be
$$x_{lin}[k] = [x[k], x[k-1], \ldots, x[k-L+1]]^T \in \mathbb{R}^{L}.$$
The second-order Volterra regressor is constructed as
$$u[k] =
\begin{bmatrix} x_{lin}[k] \ \mathrm{vec}\bigl(x_{lin}[k] x_{lin}^T[k]\bigr)_{i \le j} \end{bmatrix} \in \mathbb{R}^{n_{coeffs}},$$
where the quadratic block contains all products ``x_{lin,i}[k] x_{lin,j}[k]``
for ``0 \le i \le j \le L-1``.
The number of coefficients is
$$n_{coeffs} = L + \frac{L(L+1)}{2}.$$
RLS recursion (a priori form) With
$$y[k] = w^T[k-1] u[k], \qquad e[k] = d[k] - y[k],$$
define the gain
$$g[k] = \frac{P[k-1] u[k]}{\lambda + u^T[k] P[k-1] u[k]},$$
the inverse correlation update
$$P[k] = \frac{1}{\lambda}\left(P[k-1] - g[k] u^T[k] P[k-1]\right),$$
and the coefficient update
$$w[k] = w[k-1] + g[k] e[k].$$
A posteriori quantities
If requested, this implementation also computes the a posteriori
output/error after updating the coefficients at time k:
$$y^{post}[k] = w^T[k] u[k], \qquad e^{post}[k] = d[k] - y^{post}[k].$$
Implementation details
- The denominator lambda + u^T P u is guarded by safe_eps to avoid
numerical issues when very small.
- Coefficient history is recorded via the base class.
- The quadratic-term ordering matches _create_volterra_regressor().
References
126 def __init__( 127 self, 128 memory: int = 3, 129 forgetting_factor: float = 0.98, 130 delta: float = 1.0, 131 w_init: Optional[ArrayLike] = None, 132 *, 133 safe_eps: float = 1e-12, 134 ) -> None: 135 """ 136 Parameters 137 ---------- 138 memory: 139 Linear memory length L. Determines number of Volterra coefficients: 140 n_coeffs = L + L(L+1)/2. 141 forgetting_factor: 142 Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. 143 delta: 144 Positive regularization for initializing the inverse correlation matrix: 145 P[0] = I / delta. 146 w_init: 147 Optional initial coefficient vector (length n_coeffs). If None, zeros. 148 safe_eps: 149 Small epsilon to guard denominators. 150 """ 151 memory = int(memory) 152 if memory <= 0: 153 raise ValueError(f"memory must be > 0. Got {memory}.") 154 155 lam = float(forgetting_factor) 156 if not (0.0 < lam <= 1.0): 157 raise ValueError(f"forgetting_factor must satisfy 0 < λ <= 1. Got λ={lam}.") 158 159 delta = float(delta) 160 if delta <= 0.0: 161 raise ValueError(f"delta must be > 0. Got delta={delta}.") 162 163 self.memory: int = memory 164 self.lam: float = lam 165 self._safe_eps: float = float(safe_eps) 166 167 self.n_coeffs: int = memory + (memory * (memory + 1)) // 2 168 169 super().__init__(filter_order=self.n_coeffs - 1, w_init=w_init) 170 171 self.w = np.asarray(self.w, dtype=np.float64) 172 173 if w_init is not None: 174 w0 = np.asarray(w_init, dtype=np.float64).reshape(-1) 175 if w0.size != self.n_coeffs: 176 raise ValueError( 177 f"w_init must have length {self.n_coeffs}, got {w0.size}." 178 ) 179 self.w = w0.copy() 180 181 self.P: np.ndarray = (np.eye(self.n_coeffs, dtype=np.float64) / delta) 182 183 self.w_history = [] 184 self._record_history()
Parameters
memory: Linear memory length L. Determines number of Volterra coefficients: n_coeffs = L + L(L+1)/2. forgetting_factor: Forgetting factor λ (typically close to 1). Must satisfy 0 < λ <= 1. delta: Positive regularization for initializing the inverse correlation matrix: P[0] = I / delta. w_init: Optional initial coefficient vector (length n_coeffs). If None, zeros. safe_eps: Small epsilon to guard denominators.
216 @ensure_real_signals 217 @validate_input 218 def optimize( 219 self, 220 input_signal: np.ndarray, 221 desired_signal: np.ndarray, 222 verbose: bool = False, 223 return_internal_states: bool = False, 224 ) -> OptimizationResult: 225 """ 226 Executes the Volterra RLS adaptation loop over paired input/desired sequences. 227 228 Parameters 229 ---------- 230 input_signal : array_like of float 231 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 232 desired_signal : array_like of float 233 Desired sequence ``d[k]`` with shape ``(N,)`` (will be flattened). 234 verbose : bool, optional 235 If True, prints the total runtime after completion. 236 return_internal_states : bool, optional 237 If True, includes additional internal sequences in ``result.extra``, 238 including a posteriori output/error and last gain/denominator. 239 240 Returns 241 ------- 242 OptimizationResult 243 Result object with fields: 244 - outputs : ndarray of float, shape ``(N,)`` 245 Scalar a priori output sequence, ``y[k] = w^T[k-1] u[k]``. 246 - errors : ndarray of float, shape ``(N,)`` 247 Scalar a priori error sequence, ``e[k] = d[k] - y[k]``. 248 - coefficients : ndarray of float 249 Volterra coefficient history recorded by the base class. 250 - error_type : str 251 Set to ``"a_priori"``. 252 - extra : dict, optional 253 Present only if ``return_internal_states=True`` with: 254 - ``posteriori_outputs`` : ndarray of float 255 A posteriori output sequence ``y^{post}[k]``. 256 - ``posteriori_errors`` : ndarray of float 257 A posteriori error sequence ``e^{post}[k]``. 258 - ``last_gain`` : ndarray of float 259 Last RLS gain vector ``g[k]``. 260 - ``last_den`` : float 261 Last gain denominator ``lambda + u^T P u``. 262 - ``last_regressor`` : ndarray of float 263 Last Volterra regressor ``u[k]``. 264 - ``memory`` : int 265 Linear memory length ``L``. 266 - ``n_coeffs`` : int 267 Number of Volterra coefficients. 268 - ``forgetting_factor`` : float 269 The forgetting factor ``lambda`` used. 270 """ 271 t0 = perf_counter() 272 273 x = np.asarray(input_signal, dtype=np.float64).ravel() 274 d = np.asarray(desired_signal, dtype=np.float64).ravel() 275 276 n_samples = int(x.size) 277 278 outputs = np.zeros(n_samples, dtype=np.float64) 279 errors = np.zeros(n_samples, dtype=np.float64) 280 281 y_post = np.zeros(n_samples, dtype=np.float64) 282 e_post = np.zeros(n_samples, dtype=np.float64) 283 284 L = int(self.memory) 285 x_padded = np.zeros(n_samples + (L - 1), dtype=np.float64) 286 x_padded[L - 1 :] = x 287 288 last_k: Optional[np.ndarray] = None 289 last_den: Optional[float] = None 290 last_u: Optional[np.ndarray] = None 291 292 for k in range(n_samples): 293 x_lin = x_padded[k : k + L][::-1] 294 u = self._create_volterra_regressor(x_lin) 295 last_u = u 296 297 y_k = float(np.dot(self.w, u)) 298 e_k = float(d[k] - y_k) 299 outputs[k] = y_k 300 errors[k] = e_k 301 302 Pu = self.P @ u 303 den = float(self.lam + np.dot(u, Pu)) 304 if abs(den) < self._safe_eps: 305 den = float(den + np.sign(den) * self._safe_eps) if den != 0.0 else float(self._safe_eps) 306 307 k_gain = Pu / den 308 last_k = k_gain 309 last_den = den 310 311 self.w = self.w + k_gain * e_k 312 313 self.P = (self.P - np.outer(k_gain, Pu)) / self.lam 314 315 yk_post = float(np.dot(self.w, u)) 316 ek_post = float(d[k] - yk_post) 317 y_post[k] = yk_post 318 e_post[k] = ek_post 319 320 self._record_history() 321 322 runtime_s = float(perf_counter() - t0) 323 if verbose: 324 print(f"[VolterraRLS] Completed in {runtime_s * 1000:.03f} ms") 325 326 extra: Optional[Dict[str, Any]] = None 327 if return_internal_states: 328 extra = { 329 "posteriori_outputs": y_post, 330 "posteriori_errors": e_post, 331 "last_gain": None if last_k is None else last_k.copy(), 332 "last_den": last_den, 333 "last_regressor": None if last_u is None else last_u.copy(), 334 "memory": int(self.memory), 335 "n_coeffs": int(self.n_coeffs), 336 "forgetting_factor": float(self.lam), 337 } 338 339 return self._pack_results( 340 outputs=outputs, 341 errors=errors, 342 runtime_s=runtime_s, 343 error_type="a_priori", 344 extra=extra, 345 )
Executes the Volterra RLS adaptation loop over paired input/desired sequences.
Parameters
input_signal : array_like of float
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : array_like of float
Desired sequence d[k] with shape (N,) (will be flattened).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes additional internal sequences in result.extra,
including a posteriori output/error and last gain/denominator.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of float, shape (N,)
Scalar a priori output sequence, y[k] = w^T[k-1] u[k].
- errors : ndarray of float, shape (N,)
Scalar a priori error sequence, e[k] = d[k] - y[k].
- coefficients : ndarray of float
Volterra coefficient history recorded by the base class.
- error_type : str
Set to "a_priori".
- extra : dict, optional
Present only if return_internal_states=True with:
- posteriori_outputs : ndarray of float
A posteriori output sequence y^{post}[k].
- posteriori_errors : ndarray of float
A posteriori error sequence e^{post}[k].
- last_gain : ndarray of float
Last RLS gain vector g[k].
- last_den : float
Last gain denominator lambda + u^T P u.
- last_regressor : ndarray of float
Last Volterra regressor u[k].
- memory : int
Linear memory length L.
- n_coeffs : int
Number of Volterra coefficients.
- forgetting_factor : float
The forgetting factor lambda used.
27class CFDLMS(AdaptiveFilter): 28 """ 29 Constrained Frequency-Domain LMS (CFDLMS) for real-valued signals (block adaptive). 30 31 Implements the Constrained Frequency-Domain LMS algorithm (Algorithm 12.4, Diniz) 32 for identifying/estimating a real-valued FIR system in a block-wise frequency-domain 33 framework with a time-domain constraint (to control circular convolution / enforce 34 effective FIR support). 35 36 Block structure and main variables 37 ---------------------------------- 38 Let: 39 - M: number of subbands / FFT size (also the block length in frequency domain), 40 - L: decimation / number of fresh time samples per iteration (block advance), 41 - Nw: time-support (per subband) of the adaptive filters, so each subband filter 42 has length (Nw+1) in the *time-lag* axis (columns of `ww`). 43 44 Internal coefficient representation 45 ----------------------------------- 46 The adaptive parameters are stored as a complex matrix: 47 48 ww in C^{M x (Nw+1)} 49 50 where each row corresponds to one frequency bin (subband), and each column is a 51 delay-tap in the *block* (overlap) dimension. 52 53 For compatibility with the base API: 54 - `self.w` stores a flattened real view of `ww` (real part only), 55 - `OptimizationResult.coefficients` comes from the base `w_history` (flattened), 56 - the full matrix trajectory is returned in `result.extra["ww_history"]`. 57 58 Signal processing conventions (as implemented) 59 ---------------------------------------------- 60 Per iteration k (block index): 61 - Build an M-length time vector from the most recent input segment (reversed): 62 x_p = [x[kL+M-1], ..., x[kL]]^T 63 then compute a *unitary* FFT: 64 ui = FFT(x_p) / sqrt(M) 65 66 - Maintain a regressor matrix `uu` with shape (M, Nw+1) containing the most recent 67 Nw+1 frequency-domain regressors (columns shift right each iteration). 68 69 - Compute frequency-domain output per bin: 70 uy = sum_j uu[:, j] * ww[:, j] 71 and return to time domain: 72 y_block = IFFT(uy) * sqrt(M) 73 74 Only the first L samples are used as the “valid” output of this block. 75 76 Error, energy smoothing, and update 77 ----------------------------------- 78 The algorithm forms an L-length error (in the reversed time order used internally), 79 zero-pads it to length M, and FFTs it (unitary) to obtain `et`. 80 81 A smoothed energy estimate per bin is kept: 82 sig[k] = (1-a) sig[k-1] + a |ui|^2 83 where `a = smoothing`. 84 85 The normalized per-bin step is: 86 gain = step / (gamma + (Nw+1) * sig) 87 88 A preliminary frequency-domain correction is built: 89 wwc = gain[:,None] * conj(uu) * et[:,None] 90 91 Constrained / time-domain projection 92 ------------------------------------ 93 The “constraint” is applied by transforming wwc along axis=0 (FFT across bins), 94 zeroing time indices >= L (i.e., enforcing an L-sample time support), 95 and transforming back (IFFT). This is the standard “constrained” step that reduces 96 circular-convolution artifacts. 97 98 Returned sequences 99 ------------------ 100 - `outputs`: real-valued estimated output, length = n_iters * L 101 - `errors`: real-valued output error (d - y), same length as outputs 102 - `error_type="output_error"` (block output error, not a priori scalar error) 103 104 Parameters 105 ---------- 106 filter_order : int, default=5 107 Subband filter order Nw (number of taps is Nw+1 along the overlap dimension). 108 n_subbands : int, default=64 109 FFT size M (number of subbands / frequency bins). 110 decimation : int, optional 111 Block advance L (samples per iteration). If None, defaults to M//2. 112 step_size : float, default=0.1 113 Global step size (mu). 114 gamma : float, default=1e-2 115 Regularization constant in the normalization denominator (>0). 116 smoothing : float, default=0.01 117 Exponential smoothing factor a in (0,1]. 118 w_init : array_like, optional 119 Initial coefficients. Can be either: 120 - matrix shape (M, Nw+1), or 121 - flat length M*(Nw+1), reshaped internally. 122 123 Notes 124 ----- 125 - Real-valued interface: input_signal and desired_signal are enforced real. 126 Internally complex arithmetic is used due to FFT processing. 127 - This is a block algorithm: one iteration produces L output samples. 128 """ 129 supports_complex: bool = False 130 131 M: int 132 L: int 133 Nw: int 134 step_size: float 135 gamma: float 136 smoothing: float 137 138 def __init__( 139 self, 140 filter_order: int = 5, 141 n_subbands: int = 64, 142 decimation: Optional[int] = None, 143 step_size: float = 0.1, 144 gamma: float = 1e-2, 145 smoothing: float = 0.01, 146 w_init: Optional[ArrayLike] = None, 147 ) -> None: 148 if n_subbands <= 0: 149 raise ValueError("n_subbands (M) must be a positive integer.") 150 if filter_order < 0: 151 raise ValueError("filter_order (Nw) must be >= 0.") 152 if decimation is None: 153 decimation = n_subbands // 2 154 if decimation <= 0 or decimation > n_subbands: 155 raise ValueError("decimation (L) must satisfy 1 <= L <= M.") 156 if gamma <= 0: 157 raise ValueError("gamma must be > 0.") 158 if not (0.0 < smoothing <= 1.0): 159 raise ValueError("smoothing must be in (0, 1].") 160 161 self.M = int(n_subbands) 162 self.L = int(decimation) 163 self.Nw = int(filter_order) 164 165 self.step_size = float(step_size) 166 self.gamma = float(gamma) 167 self.smoothing = float(smoothing) 168 169 n_params = self.M * (self.Nw + 1) 170 super().__init__(filter_order=n_params - 1, w_init=None) 171 172 self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 173 if w_init is not None: 174 w0 = np.asarray(w_init) 175 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 176 self.ww = w0.astype(np.complex128, copy=True) 177 else: 178 w0 = w0.reshape(-1) 179 if w0.size != n_params: 180 raise ValueError( 181 f"w_init has incompatible size. Expected {n_params} " 182 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 183 ) 184 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 185 186 self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 187 self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64) 188 189 self.w = self.ww.reshape(-1).astype(float, copy=False) 190 self.w_history = [] 191 self._record_history() 192 193 self.ww_history: list[np.ndarray] = [] 194 195 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 196 """ 197 Reset coefficients/history. 198 199 If w_new is: 200 - None: zeros 201 - shape (M, Nw+1): used directly 202 - flat of length M*(Nw+1): reshaped 203 """ 204 n_params = self.M * (self.Nw + 1) 205 206 if w_new is None: 207 self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 208 else: 209 w0 = np.asarray(w_new) 210 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 211 self.ww = w0.astype(np.complex128, copy=True) 212 else: 213 w0 = w0.reshape(-1) 214 if w0.size != n_params: 215 raise ValueError( 216 f"w_new has incompatible size. Expected {n_params} " 217 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 218 ) 219 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 220 221 self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 222 self.sig = np.zeros(self.M, dtype=np.float64) 223 224 self.ww_history = [] 225 self.w = self.ww.reshape(-1).astype(float, copy=False) 226 self.w_history = [] 227 self._record_history() 228 229 @ensure_real_signals 230 @validate_input 231 def optimize( 232 self, 233 input_signal: np.ndarray, 234 desired_signal: np.ndarray, 235 verbose: bool = False, 236 return_internal_states: bool = False, 237 ) -> OptimizationResult: 238 """ 239 Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks. 240 241 Parameters 242 ---------- 243 input_signal : array_like of float 244 Input sequence x[n], shape (N,). 245 desired_signal : array_like of float 246 Desired sequence d[n], shape (N,). 247 verbose : bool, default=False 248 If True, prints runtime and basic iteration stats. 249 return_internal_states : bool, default=False 250 If True, includes additional internal trajectories in result.extra. 251 252 Returns 253 ------- 254 OptimizationResult 255 outputs : ndarray of float, shape (n_iters * L,) 256 Concatenated block outputs (L per iteration). 257 errors : ndarray of float, shape (n_iters * L,) 258 Output error sequence e[n] = d[n] - y[n]. 259 coefficients : ndarray 260 Flattened coefficient history (from base class; real part of ww). 261 error_type : str 262 "output_error". 263 extra : dict 264 Always contains: 265 - "ww_history": list of ndarray, each shape (M, Nw+1) 266 - "n_iters": int 267 If return_internal_states=True, also contains: 268 - "sig": ndarray, shape (M,) final smoothed per-bin energy 269 - "sig_history": ndarray, shape (n_iters, M) 270 """ 271 tic: float = time() 272 273 x = np.asarray(input_signal, dtype=np.float64).ravel() 274 d = np.asarray(desired_signal, dtype=np.float64).ravel() 275 276 M = self.M 277 L = self.L 278 Nw = self.Nw 279 280 max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0 281 max_iters_from_d = int(d.size // L) 282 n_iters = max(0, min(max_iters_from_x, max_iters_from_d)) 283 284 out_len = n_iters * L 285 outputs = np.zeros(out_len, dtype=np.float64) 286 errors = np.zeros(out_len, dtype=np.float64) 287 288 xpad = np.concatenate([np.zeros(L, dtype=np.float64), x]) 289 290 self.ww_history = [] 291 292 sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None 293 294 uu = self.uu 295 ww = self.ww 296 sig = self.sig 297 298 a = self.smoothing 299 u_step = self.step_size 300 gamma = self.gamma 301 sqrtM = np.sqrt(M) 302 303 for k in range(n_iters): 304 start = k * L 305 seg_x = xpad[start : start + M] 306 307 x_p = seg_x[::-1].astype(np.complex128, copy=False) 308 309 d_seg = d[start : start + L] 310 d_p = d_seg[::-1].astype(np.complex128, copy=False) 311 312 ui = np.fft.fft(x_p) / sqrtM 313 314 uu[:, 1:] = uu[:, :-1] 315 uu[:, 0] = ui 316 317 uy = np.sum(uu * ww, axis=1) 318 319 y_block = np.fft.ifft(uy) * sqrtM 320 y_firstL = y_block[:L] 321 322 e_rev = d_p - y_firstL 323 324 y_time = np.real(y_firstL[::-1]) 325 e_time = d_seg - y_time 326 327 outputs[start : start + L] = y_time 328 errors[start : start + L] = e_time 329 330 e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)]) 331 et = np.fft.fft(e_pad) / sqrtM 332 sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2) 333 334 denom = gamma + (Nw + 1) * sig 335 gain = u_step / denom 336 337 wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False) 338 339 waux = np.fft.fft(wwc, axis=0) / sqrtM 340 waux[L:, :] = 0.0 341 wwc_c = np.fft.ifft(waux, axis=0) * sqrtM 342 343 ww = ww + wwc_c 344 345 self.ww_history.append(ww.copy()) 346 347 self.w = np.real(ww.reshape(-1)).astype(float, copy=False) 348 self._record_history() 349 350 if return_internal_states and sig_hist is not None: 351 sig_hist[k, :] = sig 352 353 self.uu = uu 354 self.ww = ww 355 self.sig = sig 356 357 runtime_s: float = float(time() - tic) 358 if verbose: 359 print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}") 360 361 extra: Dict[str, Any] = { 362 "ww_history": self.ww_history, 363 "n_iters": int(n_iters), 364 } 365 if return_internal_states: 366 extra["sig"] = sig.copy() 367 extra["sig_history"] = sig_hist 368 369 return self._pack_results( 370 outputs=outputs, 371 errors=errors, 372 runtime_s=runtime_s, 373 error_type="output_error", 374 extra=extra, 375 )
Constrained Frequency-Domain LMS (CFDLMS) for real-valued signals (block adaptive).
Implements the Constrained Frequency-Domain LMS algorithm (Algorithm 12.4, Diniz) for identifying/estimating a real-valued FIR system in a block-wise frequency-domain framework with a time-domain constraint (to control circular convolution / enforce effective FIR support).
Block structure and main variables
Let:
- M: number of subbands / FFT size (also the block length in frequency domain),
- L: decimation / number of fresh time samples per iteration (block advance),
- Nw: time-support (per subband) of the adaptive filters, so each subband filter
has length (Nw+1) in the time-lag axis (columns of ww).
Internal coefficient representation
The adaptive parameters are stored as a complex matrix:
ww in C^{M x (Nw+1)}
where each row corresponds to one frequency bin (subband), and each column is a delay-tap in the block (overlap) dimension.
For compatibility with the base API:
- self.w stores a flattened real view of ww (real part only),
- OptimizationResult.coefficients comes from the base w_history (flattened),
- the full matrix trajectory is returned in result.extra["ww_history"].
Signal processing conventions (as implemented)
Per iteration k (block index):
- Build an M-length time vector from the most recent input segment (reversed): x_p = [x[kL+M-1], ..., x[kL]]^T then compute a *unitary* FFT: ui = FFT(x_p) / sqrt(M)
Maintain a regressor matrix
uuwith shape (M, Nw+1) containing the most recent Nw+1 frequency-domain regressors (columns shift right each iteration).Compute frequency-domain output per bin: uy = sum_j uu[:, j] * ww[:, j] and return to time domain: y_block = IFFT(uy) * sqrt(M)
Only the first L samples are used as the “valid” output of this block.
Error, energy smoothing, and update
The algorithm forms an L-length error (in the reversed time order used internally),
zero-pads it to length M, and FFTs it (unitary) to obtain et.
A smoothed energy estimate per bin is kept:
sig[k] = (1-a) sig[k-1] + a |ui|^2
where a = smoothing.
The normalized per-bin step is: gain = step / (gamma + (Nw+1) * sig)
A preliminary frequency-domain correction is built: wwc = gain[:,None] * conj(uu) * et[:,None]
Constrained / time-domain projection
The “constraint” is applied by transforming wwc along axis=0 (FFT across bins), zeroing time indices >= L (i.e., enforcing an L-sample time support), and transforming back (IFFT). This is the standard “constrained” step that reduces circular-convolution artifacts.
Returned sequences
outputs: real-valued estimated output, length = n_iters * Lerrors: real-valued output error (d - y), same length as outputserror_type="output_error"(block output error, not a priori scalar error)
Parameters
filter_order : int, default=5 Subband filter order Nw (number of taps is Nw+1 along the overlap dimension). n_subbands : int, default=64 FFT size M (number of subbands / frequency bins). decimation : int, optional Block advance L (samples per iteration). If None, defaults to M//2. step_size : float, default=0.1 Global step size (mu). gamma : float, default=1e-2 Regularization constant in the normalization denominator (>0). smoothing : float, default=0.01 Exponential smoothing factor a in (0,1]. w_init : array_like, optional Initial coefficients. Can be either: - matrix shape (M, Nw+1), or - flat length M*(Nw+1), reshaped internally.
Notes
- Real-valued interface: input_signal and desired_signal are enforced real. Internally complex arithmetic is used due to FFT processing.
- This is a block algorithm: one iteration produces L output samples.
138 def __init__( 139 self, 140 filter_order: int = 5, 141 n_subbands: int = 64, 142 decimation: Optional[int] = None, 143 step_size: float = 0.1, 144 gamma: float = 1e-2, 145 smoothing: float = 0.01, 146 w_init: Optional[ArrayLike] = None, 147 ) -> None: 148 if n_subbands <= 0: 149 raise ValueError("n_subbands (M) must be a positive integer.") 150 if filter_order < 0: 151 raise ValueError("filter_order (Nw) must be >= 0.") 152 if decimation is None: 153 decimation = n_subbands // 2 154 if decimation <= 0 or decimation > n_subbands: 155 raise ValueError("decimation (L) must satisfy 1 <= L <= M.") 156 if gamma <= 0: 157 raise ValueError("gamma must be > 0.") 158 if not (0.0 < smoothing <= 1.0): 159 raise ValueError("smoothing must be in (0, 1].") 160 161 self.M = int(n_subbands) 162 self.L = int(decimation) 163 self.Nw = int(filter_order) 164 165 self.step_size = float(step_size) 166 self.gamma = float(gamma) 167 self.smoothing = float(smoothing) 168 169 n_params = self.M * (self.Nw + 1) 170 super().__init__(filter_order=n_params - 1, w_init=None) 171 172 self.ww: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 173 if w_init is not None: 174 w0 = np.asarray(w_init) 175 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 176 self.ww = w0.astype(np.complex128, copy=True) 177 else: 178 w0 = w0.reshape(-1) 179 if w0.size != n_params: 180 raise ValueError( 181 f"w_init has incompatible size. Expected {n_params} " 182 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 183 ) 184 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 185 186 self.uu: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 187 self.sig: np.ndarray = np.zeros(self.M, dtype=np.float64) 188 189 self.w = self.ww.reshape(-1).astype(float, copy=False) 190 self.w_history = [] 191 self._record_history() 192 193 self.ww_history: list[np.ndarray] = []
195 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 196 """ 197 Reset coefficients/history. 198 199 If w_new is: 200 - None: zeros 201 - shape (M, Nw+1): used directly 202 - flat of length M*(Nw+1): reshaped 203 """ 204 n_params = self.M * (self.Nw + 1) 205 206 if w_new is None: 207 self.ww = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 208 else: 209 w0 = np.asarray(w_new) 210 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 211 self.ww = w0.astype(np.complex128, copy=True) 212 else: 213 w0 = w0.reshape(-1) 214 if w0.size != n_params: 215 raise ValueError( 216 f"w_new has incompatible size. Expected {n_params} " 217 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 218 ) 219 self.ww = w0.reshape(self.M, self.Nw + 1).astype(np.complex128, copy=True) 220 221 self.uu = np.zeros((self.M, self.Nw + 1), dtype=np.complex128) 222 self.sig = np.zeros(self.M, dtype=np.float64) 223 224 self.ww_history = [] 225 self.w = self.ww.reshape(-1).astype(float, copy=False) 226 self.w_history = [] 227 self._record_history()
Reset coefficients/history.
If w_new is:
- None: zeros
- shape (M, Nw+1): used directly
- flat of length M*(Nw+1): reshaped
229 @ensure_real_signals 230 @validate_input 231 def optimize( 232 self, 233 input_signal: np.ndarray, 234 desired_signal: np.ndarray, 235 verbose: bool = False, 236 return_internal_states: bool = False, 237 ) -> OptimizationResult: 238 """ 239 Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks. 240 241 Parameters 242 ---------- 243 input_signal : array_like of float 244 Input sequence x[n], shape (N,). 245 desired_signal : array_like of float 246 Desired sequence d[n], shape (N,). 247 verbose : bool, default=False 248 If True, prints runtime and basic iteration stats. 249 return_internal_states : bool, default=False 250 If True, includes additional internal trajectories in result.extra. 251 252 Returns 253 ------- 254 OptimizationResult 255 outputs : ndarray of float, shape (n_iters * L,) 256 Concatenated block outputs (L per iteration). 257 errors : ndarray of float, shape (n_iters * L,) 258 Output error sequence e[n] = d[n] - y[n]. 259 coefficients : ndarray 260 Flattened coefficient history (from base class; real part of ww). 261 error_type : str 262 "output_error". 263 extra : dict 264 Always contains: 265 - "ww_history": list of ndarray, each shape (M, Nw+1) 266 - "n_iters": int 267 If return_internal_states=True, also contains: 268 - "sig": ndarray, shape (M,) final smoothed per-bin energy 269 - "sig_history": ndarray, shape (n_iters, M) 270 """ 271 tic: float = time() 272 273 x = np.asarray(input_signal, dtype=np.float64).ravel() 274 d = np.asarray(desired_signal, dtype=np.float64).ravel() 275 276 M = self.M 277 L = self.L 278 Nw = self.Nw 279 280 max_iters_from_x = int(np.floor((x.size + L - M) / L) + 1) if (x.size + L) >= M else 0 281 max_iters_from_d = int(d.size // L) 282 n_iters = max(0, min(max_iters_from_x, max_iters_from_d)) 283 284 out_len = n_iters * L 285 outputs = np.zeros(out_len, dtype=np.float64) 286 errors = np.zeros(out_len, dtype=np.float64) 287 288 xpad = np.concatenate([np.zeros(L, dtype=np.float64), x]) 289 290 self.ww_history = [] 291 292 sig_hist: Optional[np.ndarray] = np.zeros((n_iters, M), dtype=np.float64) if return_internal_states else None 293 294 uu = self.uu 295 ww = self.ww 296 sig = self.sig 297 298 a = self.smoothing 299 u_step = self.step_size 300 gamma = self.gamma 301 sqrtM = np.sqrt(M) 302 303 for k in range(n_iters): 304 start = k * L 305 seg_x = xpad[start : start + M] 306 307 x_p = seg_x[::-1].astype(np.complex128, copy=False) 308 309 d_seg = d[start : start + L] 310 d_p = d_seg[::-1].astype(np.complex128, copy=False) 311 312 ui = np.fft.fft(x_p) / sqrtM 313 314 uu[:, 1:] = uu[:, :-1] 315 uu[:, 0] = ui 316 317 uy = np.sum(uu * ww, axis=1) 318 319 y_block = np.fft.ifft(uy) * sqrtM 320 y_firstL = y_block[:L] 321 322 e_rev = d_p - y_firstL 323 324 y_time = np.real(y_firstL[::-1]) 325 e_time = d_seg - y_time 326 327 outputs[start : start + L] = y_time 328 errors[start : start + L] = e_time 329 330 e_pad = np.concatenate([e_rev, np.zeros(M - L, dtype=np.complex128)]) 331 et = np.fft.fft(e_pad) / sqrtM 332 sig[:] = (1.0 - a) * sig + a * (np.abs(ui) ** 2) 333 334 denom = gamma + (Nw + 1) * sig 335 gain = u_step / denom 336 337 wwc = (gain[:, None] * np.conj(uu) * et[:, None]).astype(np.complex128, copy=False) 338 339 waux = np.fft.fft(wwc, axis=0) / sqrtM 340 waux[L:, :] = 0.0 341 wwc_c = np.fft.ifft(waux, axis=0) * sqrtM 342 343 ww = ww + wwc_c 344 345 self.ww_history.append(ww.copy()) 346 347 self.w = np.real(ww.reshape(-1)).astype(float, copy=False) 348 self._record_history() 349 350 if return_internal_states and sig_hist is not None: 351 sig_hist[k, :] = sig 352 353 self.uu = uu 354 self.ww = ww 355 self.sig = sig 356 357 runtime_s: float = float(time() - tic) 358 if verbose: 359 print(f"[CFDLMS] Completed in {runtime_s * 1000:.03f} ms | iters={n_iters} | out_len={out_len}") 360 361 extra: Dict[str, Any] = { 362 "ww_history": self.ww_history, 363 "n_iters": int(n_iters), 364 } 365 if return_internal_states: 366 extra["sig"] = sig.copy() 367 extra["sig_history"] = sig_hist 368 369 return self._pack_results( 370 outputs=outputs, 371 errors=errors, 372 runtime_s=runtime_s, 373 error_type="output_error", 374 extra=extra, 375 )
Run CFDLMS adaptation over real-valued (x[n], d[n]) in blocks.
Parameters
input_signal : array_like of float Input sequence x[n], shape (N,). desired_signal : array_like of float Desired sequence d[n], shape (N,). verbose : bool, default=False If True, prints runtime and basic iteration stats. return_internal_states : bool, default=False If True, includes additional internal trajectories in result.extra.
Returns
OptimizationResult outputs : ndarray of float, shape (n_iters * L,) Concatenated block outputs (L per iteration). errors : ndarray of float, shape (n_iters * L,) Output error sequence e[n] = d[n] - y[n]. coefficients : ndarray Flattened coefficient history (from base class; real part of ww). error_type : str "output_error". extra : dict Always contains: - "ww_history": list of ndarray, each shape (M, Nw+1) - "n_iters": int If return_internal_states=True, also contains: - "sig": ndarray, shape (M,) final smoothed per-bin energy - "sig_history": ndarray, shape (n_iters, M)
54class DLCLLMS(AdaptiveFilter): 55 """ 56 Delayless Closed-Loop Subband LMS (DLCLLMS) for real-valued fullband signals. 57 58 Implements the Delayless Closed-Loop Subband LMS adaptive filtering algorithm 59 (Algorithm 12.3, Diniz) using: 60 - a DFT analysis bank (complex subband signals), 61 - a polyphase Nyquist / fractional-delay prototype (Ed) to realize the delayless 62 closed-loop structure, 63 - and an equivalent fullband FIR mapping (GG) used to generate the output in the 64 time domain. 65 66 High-level operation (as implemented) 67 ------------------------------------- 68 Processing is block-based with block length: 69 L = M (M = number of subbands / DFT size) 70 71 For each block k: 72 1) Form a reversed block x_p and pass each sample through a per-branch fractional-delay 73 structure (polyphase) driven by `Ed`, producing x_frac (length M). 74 2) Compute subband input: 75 x_sb = F @ x_frac 76 where F is the (non-unitary) DFT matrix (MATLAB dftmtx convention). 77 3) Map current subband coefficients to an equivalent fullband FIR: 78 GG = equivalent_fullband(w_sb) 79 and filter the fullband input block through GG (with state) to produce y_block. 80 4) Compute fullband error e_block = d_block - y_block. 81 5) Pass the reversed error block through the same fractional-delay structure to get e_frac, 82 then compute subband error: 83 e_sb = F @ e_frac 84 6) Update subband coefficients with an LMS-like recursion using a subband delay line x_cl 85 and a smoothed power estimate sig[m]: 86 sig[m] = (1-a) sig[m] + a |x_sb[m]|^2 87 mu_n = step / (gamma + (Nw+1) * sig[m]) 88 w_sb[m,:] <- w_sb[m,:] + 2 * mu_n * conj(e_sb[m]) * x_cl[m,:] 89 90 Coefficient representation and mapping 91 -------------------------------------- 92 - Subband coefficients are stored in: 93 w_sb : complex ndarray, shape (M, Nw+1) 94 95 - For output synthesis and for the base API, an equivalent fullband FIR is built: 96 GG : real ndarray, length (M*Nw) 97 98 The mapping matches the provided MATLAB logic: 99 * Compute ww = real(F^H w_sb) / M 100 * For branch m=0: take ww[0, :Nw] 101 * For m>=1: convolve ww[m,:] with Ed[m-1,:] and extract a length-Nw segment 102 starting at (Dint+1), where Dint=(P-1)//2 and P is the polyphase length. 103 104 - The base-class coefficient vector `self.w` stores GG (float), and 105 `OptimizationResult.coefficients` contains the history of GG recorded **once per block** 106 (plus the initial entry). 107 108 Parameters 109 ---------- 110 filter_order : int, default=5 111 Subband filter order Nw (number of taps per subband delay line is Nw+1). 112 n_subbands : int, default=4 113 Number of subbands M (DFT size). Also equals the processing block length L. 114 step_size : float, default=0.1 115 Global LMS step size. 116 gamma : float, default=1e-2 117 Regularization constant in the normalized step denominator (>0 recommended). 118 a : float, default=1e-2 119 Exponential smoothing factor for subband power sig in (0,1]. 120 nyquist_len : int, default=2 121 Length Nfd of the Nyquist (fractional-delay) prototype used to build Ed. 122 w_init : array_like, optional 123 Initial subband coefficient matrix. Can be either: 124 - shape (M, Nw+1), or 125 - flat length M*(Nw+1), reshaped internally. 126 127 Notes 128 ----- 129 - Real-valued interface (input_signal and desired_signal enforced real). Internal 130 computations use complex subband signals. 131 - This implementation processes only `n_used = floor(N/M)*M` samples. Any tail 132 samples (N - n_used) are left with output=0 and error=d in that region. 133 - The reported `error_type` is "output_error" (fullband output error sequence). 134 """ 135 supports_complex: bool = False 136 137 def __init__( 138 self, 139 filter_order: int = 5, 140 n_subbands: int = 4, 141 step_size: float = 0.1, 142 gamma: float = 1e-2, 143 a: float = 1e-2, 144 nyquist_len: int = 2, 145 w_init: Optional[ArrayLike] = None, 146 ) -> None: 147 self.M: int = int(n_subbands) 148 if self.M <= 0: 149 raise ValueError("n_subbands must be a positive integer.") 150 151 self.Nw: int = int(filter_order) 152 if self.Nw <= 0: 153 raise ValueError("filter_order must be a positive integer.") 154 155 self.step_size: float = float(step_size) 156 self.gamma: float = float(gamma) 157 self.a: float = float(a) 158 159 self.nyquist_len: int = int(nyquist_len) 160 if self.nyquist_len <= 0: 161 raise ValueError("nyquist_len must be a positive integer.") 162 163 self._full_len: int = int(self.M * self.Nw) 164 165 super().__init__(filter_order=self._full_len - 1, w_init=None) 166 167 self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len) 168 self._P: int = int(self.Ed.shape[1]) 169 self._Dint: int = int((self._P - 1) // 2) 170 171 self.F: np.ndarray = dft_matrix(self.M) 172 173 self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 174 if w_init is not None: 175 w0 = np.asarray(w_init) 176 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 177 self.w_sb = w0.astype(complex, copy=True) 178 else: 179 w0 = w0.reshape(-1) 180 if w0.size != self.M * (self.Nw + 1): 181 raise ValueError( 182 f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} " 183 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 184 ) 185 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 186 187 self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 188 189 self.sig: np.ndarray = np.zeros((self.M,), dtype=float) 190 191 self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 192 self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 193 194 self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float) 195 196 self.w_history = [] 197 self._record_history() 198 199 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 200 """ 201 Reset coefficients and history. 202 203 - If w_new is provided: 204 * If shape (M, Nw+1): interpreted as subband coefficients. 205 * If flat of length M*(Nw+1): reshaped as subband coefficients. 206 - Resets internal states (x_cl, sig, fractional-delay, FIR state). 207 """ 208 if w_new is None: 209 self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex) 210 else: 211 w0 = np.asarray(w_new) 212 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 213 self.w_sb = w0.astype(complex, copy=True) 214 else: 215 w0 = w0.reshape(-1) 216 if w0.size != self.M * (self.Nw + 1): 217 raise ValueError( 218 f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} " 219 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 220 ) 221 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 222 223 self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex) 224 self.sig = np.zeros((self.M,), dtype=float) 225 self._xx_frac = np.zeros((self._P, self.M), dtype=float) 226 self._ee_frac = np.zeros((self._P, self.M), dtype=float) 227 self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float) 228 229 GG = self._equivalent_fullband() 230 self.w = GG.astype(float, copy=True) 231 self.w_history = [] 232 self._record_history() 233 234 def _equivalent_fullband(self) -> np.ndarray: 235 """ 236 Build the equivalent fullband FIR GG (length M*Nw) from current subband coefficients, 237 matching the MATLAB mapping. 238 239 Returns 240 ------- 241 GG : np.ndarray, shape (M*Nw,), dtype=float 242 """ 243 ww = np.real(self.F.conj().T @ self.w_sb) / float(self.M) 244 245 G = np.zeros((self.M, self.Nw), dtype=float) 246 G[0, :] = ww[0, : self.Nw] 247 248 for m in range(1, self.M): 249 aux = np.convolve(self.Ed[m - 1, :], ww[m, :], mode="full") 250 start = self._Dint + 1 251 stop = start + self.Nw 252 G[m, :] = aux[start:stop] 253 254 GG = G.reshape(-1, order="F") 255 return GG 256 257 def _fir_block(self, b: np.ndarray, x_block: np.ndarray) -> np.ndarray: 258 """ 259 FIR filtering with state, matching MATLAB `filter(b,1,x,zi)` block-by-block. 260 """ 261 Lb = int(b.size) 262 if Lb == 0: 263 return np.zeros_like(x_block, dtype=float) 264 if Lb == 1: 265 return float(b[0]) * x_block 266 267 y = np.zeros_like(x_block, dtype=float) 268 state = self._x_state 269 270 for i, x_n in enumerate(x_block): 271 acc = float(b[0]) * float(x_n) 272 if Lb > 1 and state.size > 0: 273 acc += float(np.dot(b[1:], state[: Lb - 1])) 274 y[i] = acc 275 276 if state.size > 0: 277 state[1:] = state[:-1] 278 state[0] = float(x_n) 279 280 self._x_state = state 281 return y 282 283 @ensure_real_signals 284 @validate_input 285 def optimize( 286 self, 287 input_signal: np.ndarray, 288 desired_signal: np.ndarray, 289 verbose: bool = False, 290 return_internal_states: bool = False, 291 ) -> OptimizationResult: 292 """ 293 Run DLCLLMS adaptation block-by-block. 294 295 Parameters 296 ---------- 297 input_signal : array_like of float 298 Fullband input x[n], shape (N,). 299 desired_signal : array_like of float 300 Fullband desired d[n], shape (N,). 301 verbose : bool, default=False 302 If True, prints runtime and block stats. 303 return_internal_states : bool, default=False 304 If True, returns additional internal trajectories in result.extra. 305 306 Returns 307 ------- 308 OptimizationResult 309 outputs : ndarray of float, shape (N,) 310 Estimated fullband output y[n]. Only the first `n_used` samples are 311 produced by block processing; remaining tail (if any) is zero. 312 errors : ndarray of float, shape (N,) 313 Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there. 314 coefficients : ndarray 315 History of equivalent fullband FIR vectors GG (length M*Nw), stored 316 once per processed block (plus initial entry). 317 error_type : str 318 "output_error". 319 320 extra : dict 321 Always contains: 322 - "n_blocks": number of processed blocks 323 - "block_len": block length (equals M) 324 - "n_used": number of processed samples (multiple of M) 325 If return_internal_states=True, also contains: 326 - "sig_history": ndarray (n_blocks, M) of smoothed subband power 327 - "w_sb_final": final subband coefficient matrix (M, Nw+1) 328 """ 329 tic: float = time() 330 331 x = np.asarray(input_signal, dtype=float).ravel() 332 d = np.asarray(desired_signal, dtype=float).ravel() 333 334 n_samples: int = int(x.size) 335 M: int = int(self.M) 336 L: int = M 337 338 n_blocks: int = int(n_samples // L) 339 n_used: int = int(n_blocks * L) 340 341 outputs = np.zeros((n_samples,), dtype=float) 342 errors = np.zeros((n_samples,), dtype=float) 343 344 sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None 345 346 self.w_history = [] 347 self._record_history() 348 349 if n_blocks == 0: 350 errors = d - outputs 351 runtime_s: float = float(time() - tic) 352 extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0} 353 return self._pack_results( 354 outputs=outputs, 355 errors=errors, 356 runtime_s=runtime_s, 357 error_type="output_error", 358 extra=extra, 359 ) 360 361 for k in range(n_blocks): 362 i0 = k * L 363 i1 = i0 + L 364 365 x_block = x[i0:i1] 366 d_block = d[i0:i1] 367 368 x_p = x_block[::-1] 369 370 x_frac = np.zeros((M,), dtype=float) 371 for m in range(M): 372 self._xx_frac[1:, m] = self._xx_frac[:-1, m] 373 self._xx_frac[0, m] = x_p[m] 374 x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m])) 375 376 xsb = self.F @ x_frac.astype(complex) 377 378 GG = self._equivalent_fullband() 379 y_block = self._fir_block(GG, x_block) 380 381 outputs[i0:i1] = y_block 382 e_block = d_block - y_block 383 errors[i0:i1] = e_block 384 385 self.w = GG.astype(float, copy=True) 386 self._record_history() 387 388 e_p = e_block[::-1] 389 e_frac = np.zeros((M,), dtype=float) 390 for m in range(M): 391 self._ee_frac[1:, m] = self._ee_frac[:-1, m] 392 self._ee_frac[0, m] = e_p[m] 393 e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m])) 394 395 esb = self.F @ e_frac.astype(complex) 396 397 for m in range(M): 398 self.x_cl[m, 1:] = self.x_cl[m, :-1] 399 self.x_cl[m, 0] = xsb[m] 400 401 self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2) 402 403 mu_n = self.step_size / (self.gamma + (self.Nw + 1) * self.sig[m]) 404 405 self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :] 406 407 if return_internal_states and sig_hist is not None: 408 sig_hist[k, :] = self.sig 409 410 if n_used < n_samples: 411 outputs[n_used:] = 0.0 412 errors[n_used:] = d[n_used:] - outputs[n_used:] 413 414 runtime_s: float = float(time() - tic) 415 if verbose: 416 print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}") 417 418 extra: Dict[str, Any] = { 419 "n_blocks": int(n_blocks), 420 "block_len": int(L), 421 "n_used": int(n_used), 422 } 423 if return_internal_states: 424 extra.update( 425 { 426 "sig_history": sig_hist, 427 "w_sb_final": self.w_sb.copy(), 428 } 429 ) 430 431 return self._pack_results( 432 outputs=outputs, 433 errors=errors, 434 runtime_s=runtime_s, 435 error_type="output_error", 436 extra=extra, 437 )
Delayless Closed-Loop Subband LMS (DLCLLMS) for real-valued fullband signals.
Implements the Delayless Closed-Loop Subband LMS adaptive filtering algorithm (Algorithm 12.3, Diniz) using:
- a DFT analysis bank (complex subband signals),
- a polyphase Nyquist / fractional-delay prototype (Ed) to realize the delayless closed-loop structure,
- and an equivalent fullband FIR mapping (GG) used to generate the output in the time domain.
High-level operation (as implemented)
Processing is block-based with block length: L = M (M = number of subbands / DFT size)
For each block k:
1) Form a reversed block x_p and pass each sample through a per-branch fractional-delay
structure (polyphase) driven by Ed, producing x_frac (length M).
2) Compute subband input:
x_sb = F @ x_frac
where F is the (non-unitary) DFT matrix (MATLAB dftmtx convention).
3) Map current subband coefficients to an equivalent fullband FIR:
GG = equivalent_fullband(w_sb)
and filter the fullband input block through GG (with state) to produce y_block.
4) Compute fullband error e_block = d_block - y_block.
5) Pass the reversed error block through the same fractional-delay structure to get e_frac,
then compute subband error:
e_sb = F @ e_frac
6) Update subband coefficients with an LMS-like recursion using a subband delay line x_cl
and a smoothed power estimate sig[m]:
sig[m] = (1-a) sig[m] + a |x_sb[m]|^2
mu_n = step / (gamma + (Nw+1) * sig[m])
w_sb[m,:] <- w_sb[m,:] + 2 * mu_n * conj(e_sb[m]) * x_cl[m,:]
Coefficient representation and mapping
Subband coefficients are stored in: w_sb : complex ndarray, shape (M, Nw+1)
For output synthesis and for the base API, an equivalent fullband FIR is built: GG : real ndarray, length (M*Nw)
The mapping matches the provided MATLAB logic:
- Compute ww = real(F^H w_sb) / M
- For branch m=0: take ww[0, :Nw]
- For m>=1: convolve ww[m,:] with Ed[m-1,:] and extract a length-Nw segment starting at (Dint+1), where Dint=(P-1)//2 and P is the polyphase length.
The base-class coefficient vector
self.wstores GG (float), andOptimizationResult.coefficientscontains the history of GG recorded once per block (plus the initial entry).
Parameters
filter_order : int, default=5 Subband filter order Nw (number of taps per subband delay line is Nw+1). n_subbands : int, default=4 Number of subbands M (DFT size). Also equals the processing block length L. step_size : float, default=0.1 Global LMS step size. gamma : float, default=1e-2 Regularization constant in the normalized step denominator (>0 recommended). a : float, default=1e-2 Exponential smoothing factor for subband power sig in (0,1]. nyquist_len : int, default=2 Length Nfd of the Nyquist (fractional-delay) prototype used to build Ed. w_init : array_like, optional Initial subband coefficient matrix. Can be either: - shape (M, Nw+1), or - flat length M*(Nw+1), reshaped internally.
Notes
- Real-valued interface (input_signal and desired_signal enforced real). Internal computations use complex subband signals.
- This implementation processes only
n_used = floor(N/M)*Msamples. Any tail samples (N - n_used) are left with output=0 and error=d in that region. - The reported
error_typeis "output_error" (fullband output error sequence).
137 def __init__( 138 self, 139 filter_order: int = 5, 140 n_subbands: int = 4, 141 step_size: float = 0.1, 142 gamma: float = 1e-2, 143 a: float = 1e-2, 144 nyquist_len: int = 2, 145 w_init: Optional[ArrayLike] = None, 146 ) -> None: 147 self.M: int = int(n_subbands) 148 if self.M <= 0: 149 raise ValueError("n_subbands must be a positive integer.") 150 151 self.Nw: int = int(filter_order) 152 if self.Nw <= 0: 153 raise ValueError("filter_order must be a positive integer.") 154 155 self.step_size: float = float(step_size) 156 self.gamma: float = float(gamma) 157 self.a: float = float(a) 158 159 self.nyquist_len: int = int(nyquist_len) 160 if self.nyquist_len <= 0: 161 raise ValueError("nyquist_len must be a positive integer.") 162 163 self._full_len: int = int(self.M * self.Nw) 164 165 super().__init__(filter_order=self._full_len - 1, w_init=None) 166 167 self.Ed: np.ndarray = _design_polyphase_nyquist_bank(self.M, self.nyquist_len) 168 self._P: int = int(self.Ed.shape[1]) 169 self._Dint: int = int((self._P - 1) // 2) 170 171 self.F: np.ndarray = dft_matrix(self.M) 172 173 self.w_sb: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 174 if w_init is not None: 175 w0 = np.asarray(w_init) 176 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 177 self.w_sb = w0.astype(complex, copy=True) 178 else: 179 w0 = w0.reshape(-1) 180 if w0.size != self.M * (self.Nw + 1): 181 raise ValueError( 182 f"w_init has incompatible size. Expected {self.M*(self.Nw+1)} " 183 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 184 ) 185 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 186 187 self.x_cl: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=complex) 188 189 self.sig: np.ndarray = np.zeros((self.M,), dtype=float) 190 191 self._xx_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 192 self._ee_frac: np.ndarray = np.zeros((self._P, self.M), dtype=float) 193 194 self._x_state: np.ndarray = np.zeros((max(self._full_len - 1, 0),), dtype=float) 195 196 self.w_history = [] 197 self._record_history()
199 def reset_filter(self, w_new: Optional[Union[np.ndarray, list]] = None) -> None: 200 """ 201 Reset coefficients and history. 202 203 - If w_new is provided: 204 * If shape (M, Nw+1): interpreted as subband coefficients. 205 * If flat of length M*(Nw+1): reshaped as subband coefficients. 206 - Resets internal states (x_cl, sig, fractional-delay, FIR state). 207 """ 208 if w_new is None: 209 self.w_sb = np.zeros((self.M, self.Nw + 1), dtype=complex) 210 else: 211 w0 = np.asarray(w_new) 212 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 213 self.w_sb = w0.astype(complex, copy=True) 214 else: 215 w0 = w0.reshape(-1) 216 if w0.size != self.M * (self.Nw + 1): 217 raise ValueError( 218 f"w_new has incompatible size. Expected {self.M*(self.Nw+1)} " 219 f"or shape ({self.M},{self.Nw+1}), got {w0.size}." 220 ) 221 self.w_sb = w0.reshape((self.M, self.Nw + 1)).astype(complex, copy=True) 222 223 self.x_cl = np.zeros((self.M, self.Nw + 1), dtype=complex) 224 self.sig = np.zeros((self.M,), dtype=float) 225 self._xx_frac = np.zeros((self._P, self.M), dtype=float) 226 self._ee_frac = np.zeros((self._P, self.M), dtype=float) 227 self._x_state = np.zeros((max(self._full_len - 1, 0),), dtype=float) 228 229 GG = self._equivalent_fullband() 230 self.w = GG.astype(float, copy=True) 231 self.w_history = [] 232 self._record_history()
Reset coefficients and history.
- If w_new is provided:
- If shape (M, Nw+1): interpreted as subband coefficients.
- If flat of length M*(Nw+1): reshaped as subband coefficients.
- Resets internal states (x_cl, sig, fractional-delay, FIR state).
283 @ensure_real_signals 284 @validate_input 285 def optimize( 286 self, 287 input_signal: np.ndarray, 288 desired_signal: np.ndarray, 289 verbose: bool = False, 290 return_internal_states: bool = False, 291 ) -> OptimizationResult: 292 """ 293 Run DLCLLMS adaptation block-by-block. 294 295 Parameters 296 ---------- 297 input_signal : array_like of float 298 Fullband input x[n], shape (N,). 299 desired_signal : array_like of float 300 Fullband desired d[n], shape (N,). 301 verbose : bool, default=False 302 If True, prints runtime and block stats. 303 return_internal_states : bool, default=False 304 If True, returns additional internal trajectories in result.extra. 305 306 Returns 307 ------- 308 OptimizationResult 309 outputs : ndarray of float, shape (N,) 310 Estimated fullband output y[n]. Only the first `n_used` samples are 311 produced by block processing; remaining tail (if any) is zero. 312 errors : ndarray of float, shape (N,) 313 Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there. 314 coefficients : ndarray 315 History of equivalent fullband FIR vectors GG (length M*Nw), stored 316 once per processed block (plus initial entry). 317 error_type : str 318 "output_error". 319 320 extra : dict 321 Always contains: 322 - "n_blocks": number of processed blocks 323 - "block_len": block length (equals M) 324 - "n_used": number of processed samples (multiple of M) 325 If return_internal_states=True, also contains: 326 - "sig_history": ndarray (n_blocks, M) of smoothed subband power 327 - "w_sb_final": final subband coefficient matrix (M, Nw+1) 328 """ 329 tic: float = time() 330 331 x = np.asarray(input_signal, dtype=float).ravel() 332 d = np.asarray(desired_signal, dtype=float).ravel() 333 334 n_samples: int = int(x.size) 335 M: int = int(self.M) 336 L: int = M 337 338 n_blocks: int = int(n_samples // L) 339 n_used: int = int(n_blocks * L) 340 341 outputs = np.zeros((n_samples,), dtype=float) 342 errors = np.zeros((n_samples,), dtype=float) 343 344 sig_hist: Optional[np.ndarray] = np.zeros((n_blocks, M), dtype=float) if return_internal_states else None 345 346 self.w_history = [] 347 self._record_history() 348 349 if n_blocks == 0: 350 errors = d - outputs 351 runtime_s: float = float(time() - tic) 352 extra: Dict[str, Any] = {"n_blocks": 0, "block_len": L, "n_used": 0} 353 return self._pack_results( 354 outputs=outputs, 355 errors=errors, 356 runtime_s=runtime_s, 357 error_type="output_error", 358 extra=extra, 359 ) 360 361 for k in range(n_blocks): 362 i0 = k * L 363 i1 = i0 + L 364 365 x_block = x[i0:i1] 366 d_block = d[i0:i1] 367 368 x_p = x_block[::-1] 369 370 x_frac = np.zeros((M,), dtype=float) 371 for m in range(M): 372 self._xx_frac[1:, m] = self._xx_frac[:-1, m] 373 self._xx_frac[0, m] = x_p[m] 374 x_frac[m] = float(np.dot(self.Ed[m, :], self._xx_frac[:, m])) 375 376 xsb = self.F @ x_frac.astype(complex) 377 378 GG = self._equivalent_fullband() 379 y_block = self._fir_block(GG, x_block) 380 381 outputs[i0:i1] = y_block 382 e_block = d_block - y_block 383 errors[i0:i1] = e_block 384 385 self.w = GG.astype(float, copy=True) 386 self._record_history() 387 388 e_p = e_block[::-1] 389 e_frac = np.zeros((M,), dtype=float) 390 for m in range(M): 391 self._ee_frac[1:, m] = self._ee_frac[:-1, m] 392 self._ee_frac[0, m] = e_p[m] 393 e_frac[m] = float(np.dot(self.Ed[m, :], self._ee_frac[:, m])) 394 395 esb = self.F @ e_frac.astype(complex) 396 397 for m in range(M): 398 self.x_cl[m, 1:] = self.x_cl[m, :-1] 399 self.x_cl[m, 0] = xsb[m] 400 401 self.sig[m] = (1.0 - self.a) * self.sig[m] + self.a * (np.abs(xsb[m]) ** 2) 402 403 mu_n = self.step_size / (self.gamma + (self.Nw + 1) * self.sig[m]) 404 405 self.w_sb[m, :] = self.w_sb[m, :] + 2.0 * mu_n * np.conj(esb[m]) * self.x_cl[m, :] 406 407 if return_internal_states and sig_hist is not None: 408 sig_hist[k, :] = self.sig 409 410 if n_used < n_samples: 411 outputs[n_used:] = 0.0 412 errors[n_used:] = d[n_used:] - outputs[n_used:] 413 414 runtime_s: float = float(time() - tic) 415 if verbose: 416 print(f"[DLCLLMS] Completed in {runtime_s * 1000:.03f} ms | blocks={n_blocks} | used={n_used}/{n_samples}") 417 418 extra: Dict[str, Any] = { 419 "n_blocks": int(n_blocks), 420 "block_len": int(L), 421 "n_used": int(n_used), 422 } 423 if return_internal_states: 424 extra.update( 425 { 426 "sig_history": sig_hist, 427 "w_sb_final": self.w_sb.copy(), 428 } 429 ) 430 431 return self._pack_results( 432 outputs=outputs, 433 errors=errors, 434 runtime_s=runtime_s, 435 error_type="output_error", 436 extra=extra, 437 )
Run DLCLLMS adaptation block-by-block.
Parameters
input_signal : array_like of float Fullband input x[n], shape (N,). desired_signal : array_like of float Fullband desired d[n], shape (N,). verbose : bool, default=False If True, prints runtime and block stats. return_internal_states : bool, default=False If True, returns additional internal trajectories in result.extra.
Returns
OptimizationResult
outputs : ndarray of float, shape (N,)
Estimated fullband output y[n]. Only the first n_used samples are
produced by block processing; remaining tail (if any) is zero.
errors : ndarray of float, shape (N,)
Fullband error e[n] = d[n] - y[n]. Tail (if any) equals d[n] there.
coefficients : ndarray
History of equivalent fullband FIR vectors GG (length M*Nw), stored
once per processed block (plus initial entry).
error_type : str
"output_error".
extra : dict
Always contains:
- "n_blocks": number of processed blocks
- "block_len": block length (equals M)
- "n_used": number of processed samples (multiple of M)
If return_internal_states=True, also contains:
- "sig_history": ndarray (n_blocks, M) of smoothed subband power
- "w_sb_final": final subband coefficient matrix (M, Nw+1)
40class OLSBLMS(AdaptiveFilter): 41 """ 42 Open-Loop Subband LMS (OLSBLMS) for real-valued fullband signals. 43 44 Implements the Open-Loop Subband LMS adaptive filtering algorithm 45 (Algorithm 12.1, Diniz) using an analysis/synthesis filterbank with 46 subband-adaptive FIR filters. 47 48 High-level operation (as implemented) 49 ------------------------------------- 50 Given fullband input x[n] and desired d[n], and an M-channel analysis bank h_k[m], 51 the algorithm proceeds in two stages: 52 53 (A) Analysis + Decimation (open-loop) 54 For each subband m = 0..M-1: 55 - Filter the fullband input and desired with the analysis filter: 56 x_aux[m] = filter(hk[m], 1, x) 57 d_aux[m] = filter(hk[m], 1, d) 58 - Decimate by L (keep samples 0, L, 2L, ...): 59 x_sb[m] = x_aux[m][::L] 60 d_sb[m] = d_aux[m][::L] 61 62 The adaptation length is: 63 N_iter = min_m len(x_sb[m]) and len(d_sb[m]) 64 (i.e., all subbands are truncated to the shortest decimated sequence). 65 66 (B) Subband LMS adaptation (per-sample in decimated time) 67 Each subband has its own tapped-delay line x_ol[m,:] of length (Nw+1) and 68 its own coefficient vector w_mat[m,:] (also length Nw+1). 69 70 For each decimated-time index k = 0..N_iter-1, and for each subband m: 71 - Update subband delay line: 72 x_ol[m,0] = x_sb[m,k] 73 - Compute subband output and error: 74 y_sb[m,k] = w_mat[m]^T x_ol[m] 75 e_sb[m,k] = d_sb[m,k] - y_sb[m,k] 76 - Update a smoothed subband energy estimate: 77 sig_ol[m] = (1-a) sig_ol[m] + a * x_sb[m,k]^2 78 - Normalized LMS-like step: 79 mu_m = (step) / (gamma + (Nw+1)*sig_ol[m]) 80 - Coefficient update: 81 w_mat[m] <- w_mat[m] + mu_m * e_sb[m,k] * x_ol[m] 82 83 Fullband reconstruction (convenience synthesis) 84 ---------------------------------------------- 85 After adaptation, a fullband output is reconstructed via the synthesis bank f_k[m]: 86 - Upsample each subband output by L (zero-stuffing), then filter: 87 y_up[m] = upsample(y_sb[m], L) 88 y_full[m] = filter(fk[m], 1, y_up[m]) 89 - Sum across subbands: 90 y[n] = sum_m y_full[m][n] 91 The returned error is the fullband output error e[n] = d[n] - y[n]. 92 93 Coefficient representation and history 94 -------------------------------------- 95 - The adaptive parameters are stored as: 96 w_mat : ndarray, shape (M, Nw+1), dtype=float 97 - For compatibility with the base class, `self.w` is a flattened view of w_mat 98 (row-major), and `OptimizationResult.coefficients` contains the stacked history 99 of this flattened vector (recorded once per decimated-time iteration, plus the 100 initial entry). 101 - The full (M, Nw+1) snapshots are also stored in `extra["w_matrix_history"]`. 102 103 Parameters 104 ---------- 105 n_subbands : int 106 Number of subbands (M). 107 analysis_filters : array_like 108 Analysis bank hk with shape (M, Lh). 109 synthesis_filters : array_like 110 Synthesis bank fk with shape (M, Lf). 111 filter_order : int 112 Subband FIR order Nw (number of taps per subband is Nw+1). 113 step_size : float, default=0.1 114 Global LMS step-size factor. 115 gamma : float, default=1e-2 116 Regularization term in the normalized denominator (>0 recommended). 117 a : float, default=0.01 118 Exponential smoothing factor for subband energy estimates in (0,1]. 119 decimation_factor : int, optional 120 Decimation factor L. If None, uses L=M. 121 w_init : array_like, optional 122 Initial subband coefficients. Can be: 123 - shape (M, Nw+1), or 124 - flat of length M*(Nw+1), reshaped row-major. 125 126 Notes 127 ----- 128 - Real-valued interface (input_signal and desired_signal enforced real). 129 - This is an *open-loop* structure: subband regressors are formed from the 130 analysis-filtered fullband input, independent of any reconstructed fullband 131 output loop. 132 - Subband MSE curves are provided as `mse_subbands = e_sb**2` and 133 `mse_overall = mean_m mse_subbands[m,k]`. 134 135 """ 136 supports_complex: bool = False 137 138 M: int 139 Nw: int 140 L: int 141 step_size: float 142 gamma: float 143 a: float 144 145 def __init__( 146 self, 147 n_subbands: int, 148 analysis_filters: ArrayLike, 149 synthesis_filters: ArrayLike, 150 filter_order: int, 151 step_size: float = 0.1, 152 gamma: float = 1e-2, 153 a: float = 0.01, 154 decimation_factor: Optional[int] = None, 155 w_init: Optional[ArrayLike] = None, 156 ) -> None: 157 self.M = int(n_subbands) 158 if self.M <= 0: 159 raise ValueError("n_subbands must be a positive integer.") 160 161 self.Nw = int(filter_order) 162 if self.Nw < 0: 163 raise ValueError("filter_order must be a Non-negative integer.") 164 165 self.step_size = float(step_size) 166 self.gamma = float(gamma) 167 self.a = float(a) 168 169 hk = np.asarray(analysis_filters, dtype=float) 170 fk = np.asarray(synthesis_filters, dtype=float) 171 172 if hk.ndim != 2 or fk.ndim != 2: 173 raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).") 174 if hk.shape[0] != self.M or fk.shape[0] != self.M: 175 raise ValueError( 176 f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}." 177 ) 178 179 self.hk = hk 180 self.fk = fk 181 182 self.L = int(decimation_factor) if decimation_factor is not None else self.M 183 if self.L <= 0: 184 raise ValueError("decimation_factor L must be a positive integer.") 185 186 self._n_params = int(self.M * (self.Nw + 1)) 187 super().__init__(filter_order=self._n_params - 1, w_init=None) 188 189 self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float) 190 if w_init is not None: 191 w0 = np.asarray(w_init, dtype=float) 192 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 193 self.w_mat = w0.copy() 194 elif w0.ndim == 1 and w0.size == self._n_params: 195 self.w_mat = w0.reshape(self.M, self.Nw + 1).copy() 196 else: 197 raise ValueError( 198 "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). " 199 f"Got w_init.shape={w0.shape}." 200 ) 201 202 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 203 self.w_history = [] 204 self._record_history() 205 206 self.w_matrix_history: list[np.ndarray] = [] 207 208 def _sync_base_w(self) -> None: 209 """Keep base `self.w` consistent with the subband matrix.""" 210 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 211 212 @classmethod 213 def default_test_init_kwargs(cls, order: int) -> dict: 214 M = 1 215 hk = np.array([[1.0]], dtype=float) 216 fk = np.array([[1.0]], dtype=float) 217 return dict( 218 n_subbands=M, 219 analysis_filters=hk, 220 synthesis_filters=fk, 221 filter_order=order, 222 step_size=0.1, 223 gamma=1e-2, 224 a=0.01, 225 decimation_factor=1, 226 ) 227 228 @ensure_real_signals 229 @validate_input 230 def optimize( 231 self, 232 input_signal: np.ndarray, 233 desired_signal: np.ndarray, 234 verbose: bool = False, 235 return_internal_states: bool = False, 236 ) -> OptimizationResult: 237 """ 238 Run OLSBLMS adaptation. 239 240 Parameters 241 ---------- 242 input_signal : array_like of float 243 Fullband input x[n], shape (N,). 244 desired_signal : array_like of float 245 Fullband desired d[n], shape (N,). 246 verbose : bool, default=False 247 If True, prints runtime and iteration count. 248 return_internal_states : bool, default=False 249 If True, returns additional internal states in result.extra. 250 251 Returns 252 ------- 253 OptimizationResult 254 outputs : ndarray of float, shape (N,) 255 Fullband reconstructed output y[n] obtained by synthesis of the 256 subband outputs after adaptation. 257 errors : ndarray of float, shape (N,) 258 Fullband output error e[n] = d[n] - y[n]. 259 coefficients : ndarray 260 Flattened coefficient history of w_mat, shape 261 (#snapshots, M*(Nw+1)), where snapshots are recorded once per 262 subband-iteration (decimated-time step), plus the initial entry. 263 error_type : str 264 "output_error". 265 266 extra : dict 267 Always contains: 268 - "w_matrix_history": list of (M, Nw+1) coefficient snapshots 269 - "subband_outputs": ndarray (M, N_iter) 270 - "subband_errors": ndarray (M, N_iter) 271 - "mse_subbands": ndarray (M, N_iter) with e_sb**2 272 - "mse_overall": ndarray (N_iter,) mean subband MSE per iteration 273 If return_internal_states=True, also contains: 274 - "sig_ol": final subband energy estimates, shape (M,) 275 """ 276 tic: float = time() 277 278 x = np.asarray(input_signal, dtype=float).ravel() 279 d = np.asarray(desired_signal, dtype=float).ravel() 280 281 n_samples: int = int(x.size) 282 283 xsb_list: list[np.ndarray] = [] 284 dsb_list: list[np.ndarray] = [] 285 for m in range(self.M): 286 xaux_x = fir_filter_causal(self.hk[m, :], x) 287 xaux_d = fir_filter_causal(self.hk[m, :], d) 288 xsb_list.append(_decimate_by_L(xaux_x, self.L)) 289 dsb_list.append(_decimate_by_L(xaux_d, self.L)) 290 291 N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0 292 if N_iter == 0: 293 y0 = np.zeros_like(d) 294 runtime_s = float(time() - tic) 295 return self._pack_results( 296 outputs=y0, 297 errors=d - y0, 298 runtime_s=runtime_s, 299 error_type="output_error", 300 extra={ 301 "w_matrix_history": [], 302 "subband_outputs": np.zeros((self.M, 0), dtype=float), 303 "subband_errors": np.zeros((self.M, 0), dtype=float), 304 "mse_subbands": np.zeros((self.M, 0), dtype=float), 305 "mse_overall": np.zeros((0,), dtype=float), 306 }, 307 ) 308 309 xsb = np.vstack([arr[:N_iter] for arr in xsb_list]) 310 dsb = np.vstack([arr[:N_iter] for arr in dsb_list]) 311 312 y_sb = np.zeros((self.M, N_iter), dtype=float) 313 e_sb = np.zeros((self.M, N_iter), dtype=float) 314 315 x_ol = np.zeros((self.M, self.Nw + 1), dtype=float) 316 sig_ol = np.zeros((self.M,), dtype=float) 317 318 self.w_history = [] 319 self._record_history() 320 self.w_matrix_history = [] 321 322 for k in range(N_iter): 323 for m in range(self.M): 324 x_ol[m, 1:] = x_ol[m, :-1] 325 x_ol[m, 0] = xsb[m, k] 326 327 y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :])) 328 e_sb[m, k] = float(dsb[m, k] - y_sb[m, k]) 329 330 sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2) 331 332 mu_m = (self.step_size) / (self.gamma + (self.Nw + 1) * sig_ol[m]) 333 334 self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :] 335 336 self.w_matrix_history.append(self.w_mat.copy()) 337 self._sync_base_w() 338 self._record_history() 339 340 y_full = np.zeros((n_samples,), dtype=float) 341 for m in range(self.M): 342 y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples) 343 y_full += fir_filter_causal(self.fk[m, :], y_up) 344 345 e_full = d - y_full 346 347 mse_subbands = e_sb ** 2 348 mse_overall = np.mean(mse_subbands, axis=0) 349 350 runtime_s: float = float(time() - tic) 351 if verbose: 352 print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}") 353 354 extra: Dict[str, Any] = { 355 "w_matrix_history": self.w_matrix_history, 356 "subband_outputs": y_sb, 357 "subband_errors": e_sb, 358 "mse_subbands": mse_subbands, 359 "mse_overall": mse_overall, 360 } 361 if return_internal_states: 362 extra["sig_ol"] = sig_ol.copy() 363 364 return self._pack_results( 365 outputs=y_full, 366 errors=e_full, 367 runtime_s=runtime_s, 368 error_type="output_error", 369 extra=extra, 370 )
Open-Loop Subband LMS (OLSBLMS) for real-valued fullband signals.
Implements the Open-Loop Subband LMS adaptive filtering algorithm (Algorithm 12.1, Diniz) using an analysis/synthesis filterbank with subband-adaptive FIR filters.
High-level operation (as implemented)
Given fullband input x[n] and desired d[n], and an M-channel analysis bank h_k[m], the algorithm proceeds in two stages:
(A) Analysis + Decimation (open-loop) For each subband m = 0..M-1: - Filter the fullband input and desired with the analysis filter: x_aux[m] = filter(hk[m], 1, x) d_aux[m] = filter(hk[m], 1, d) - Decimate by L (keep samples 0, L, 2L, ...): x_sb[m] = x_aux[m][::L] d_sb[m] = d_aux[m][::L]
The adaptation length is:
N_iter = min_m len(x_sb[m]) and len(d_sb[m])
(i.e., all subbands are truncated to the shortest decimated sequence).
(B) Subband LMS adaptation (per-sample in decimated time) Each subband has its own tapped-delay line x_ol[m,:] of length (Nw+1) and its own coefficient vector w_mat[m,:] (also length Nw+1).
For each decimated-time index k = 0..N_iter-1, and for each subband m:
- Update subband delay line:
x_ol[m,0] = x_sb[m,k]
- Compute subband output and error:
y_sb[m,k] = w_mat[m]^T x_ol[m]
e_sb[m,k] = d_sb[m,k] - y_sb[m,k]
- Update a smoothed subband energy estimate:
sig_ol[m] = (1-a) sig_ol[m] + a * x_sb[m,k]^2
- Normalized LMS-like step:
mu_m = (step) / (gamma + (Nw+1)*sig_ol[m])
- Coefficient update:
w_mat[m] <- w_mat[m] + mu_m * e_sb[m,k] * x_ol[m]
Fullband reconstruction (convenience synthesis)
After adaptation, a fullband output is reconstructed via the synthesis bank f_k[m]:
- Upsample each subband output by L (zero-stuffing), then filter: y_up[m] = upsample(y_sb[m], L) y_full[m] = filter(fk[m], 1, y_up[m])
- Sum across subbands: y[n] = sum_m y_full[m][n] The returned error is the fullband output error e[n] = d[n] - y[n].
Coefficient representation and history
- The adaptive parameters are stored as: w_mat : ndarray, shape (M, Nw+1), dtype=float
- For compatibility with the base class,
self.wis a flattened view of w_mat (row-major), andOptimizationResult.coefficientscontains the stacked history of this flattened vector (recorded once per decimated-time iteration, plus the initial entry). - The full (M, Nw+1) snapshots are also stored in
extra["w_matrix_history"].
Parameters
n_subbands : int Number of subbands (M). analysis_filters : array_like Analysis bank hk with shape (M, Lh). synthesis_filters : array_like Synthesis bank fk with shape (M, Lf). filter_order : int Subband FIR order Nw (number of taps per subband is Nw+1). step_size : float, default=0.1 Global LMS step-size factor. gamma : float, default=1e-2 Regularization term in the normalized denominator (>0 recommended). a : float, default=0.01 Exponential smoothing factor for subband energy estimates in (0,1]. decimation_factor : int, optional Decimation factor L. If None, uses L=M. w_init : array_like, optional Initial subband coefficients. Can be: - shape (M, Nw+1), or - flat of length M*(Nw+1), reshaped row-major.
Notes
- Real-valued interface (input_signal and desired_signal enforced real).
- This is an open-loop structure: subband regressors are formed from the analysis-filtered fullband input, independent of any reconstructed fullband output loop.
- Subband MSE curves are provided as
mse_subbands = e_sb**2andmse_overall = mean_m mse_subbands[m,k].
145 def __init__( 146 self, 147 n_subbands: int, 148 analysis_filters: ArrayLike, 149 synthesis_filters: ArrayLike, 150 filter_order: int, 151 step_size: float = 0.1, 152 gamma: float = 1e-2, 153 a: float = 0.01, 154 decimation_factor: Optional[int] = None, 155 w_init: Optional[ArrayLike] = None, 156 ) -> None: 157 self.M = int(n_subbands) 158 if self.M <= 0: 159 raise ValueError("n_subbands must be a positive integer.") 160 161 self.Nw = int(filter_order) 162 if self.Nw < 0: 163 raise ValueError("filter_order must be a Non-negative integer.") 164 165 self.step_size = float(step_size) 166 self.gamma = float(gamma) 167 self.a = float(a) 168 169 hk = np.asarray(analysis_filters, dtype=float) 170 fk = np.asarray(synthesis_filters, dtype=float) 171 172 if hk.ndim != 2 or fk.ndim != 2: 173 raise ValueError("analysis_filters and synthesis_filters must be 2D arrays with shape (M, Lh/Lf).") 174 if hk.shape[0] != self.M or fk.shape[0] != self.M: 175 raise ValueError( 176 f"Filterbanks must have M rows. Got hk.shape[0]={hk.shape[0]}, fk.shape[0]={fk.shape[0]}, M={self.M}." 177 ) 178 179 self.hk = hk 180 self.fk = fk 181 182 self.L = int(decimation_factor) if decimation_factor is not None else self.M 183 if self.L <= 0: 184 raise ValueError("decimation_factor L must be a positive integer.") 185 186 self._n_params = int(self.M * (self.Nw + 1)) 187 super().__init__(filter_order=self._n_params - 1, w_init=None) 188 189 self.w_mat: np.ndarray = np.zeros((self.M, self.Nw + 1), dtype=float) 190 if w_init is not None: 191 w0 = np.asarray(w_init, dtype=float) 192 if w0.ndim == 2 and w0.shape == (self.M, self.Nw + 1): 193 self.w_mat = w0.copy() 194 elif w0.ndim == 1 and w0.size == self._n_params: 195 self.w_mat = w0.reshape(self.M, self.Nw + 1).copy() 196 else: 197 raise ValueError( 198 "w_init must have shape (M, Nw+1) or be a flat vector of length M*(Nw+1). " 199 f"Got w_init.shape={w0.shape}." 200 ) 201 202 self.w = self.w_mat.reshape(-1).astype(float, copy=False) 203 self.w_history = [] 204 self._record_history() 205 206 self.w_matrix_history: list[np.ndarray] = []
212 @classmethod 213 def default_test_init_kwargs(cls, order: int) -> dict: 214 M = 1 215 hk = np.array([[1.0]], dtype=float) 216 fk = np.array([[1.0]], dtype=float) 217 return dict( 218 n_subbands=M, 219 analysis_filters=hk, 220 synthesis_filters=fk, 221 filter_order=order, 222 step_size=0.1, 223 gamma=1e-2, 224 a=0.01, 225 decimation_factor=1, 226 )
Override in subclasses to provide init kwargs for standardized tests.
228 @ensure_real_signals 229 @validate_input 230 def optimize( 231 self, 232 input_signal: np.ndarray, 233 desired_signal: np.ndarray, 234 verbose: bool = False, 235 return_internal_states: bool = False, 236 ) -> OptimizationResult: 237 """ 238 Run OLSBLMS adaptation. 239 240 Parameters 241 ---------- 242 input_signal : array_like of float 243 Fullband input x[n], shape (N,). 244 desired_signal : array_like of float 245 Fullband desired d[n], shape (N,). 246 verbose : bool, default=False 247 If True, prints runtime and iteration count. 248 return_internal_states : bool, default=False 249 If True, returns additional internal states in result.extra. 250 251 Returns 252 ------- 253 OptimizationResult 254 outputs : ndarray of float, shape (N,) 255 Fullband reconstructed output y[n] obtained by synthesis of the 256 subband outputs after adaptation. 257 errors : ndarray of float, shape (N,) 258 Fullband output error e[n] = d[n] - y[n]. 259 coefficients : ndarray 260 Flattened coefficient history of w_mat, shape 261 (#snapshots, M*(Nw+1)), where snapshots are recorded once per 262 subband-iteration (decimated-time step), plus the initial entry. 263 error_type : str 264 "output_error". 265 266 extra : dict 267 Always contains: 268 - "w_matrix_history": list of (M, Nw+1) coefficient snapshots 269 - "subband_outputs": ndarray (M, N_iter) 270 - "subband_errors": ndarray (M, N_iter) 271 - "mse_subbands": ndarray (M, N_iter) with e_sb**2 272 - "mse_overall": ndarray (N_iter,) mean subband MSE per iteration 273 If return_internal_states=True, also contains: 274 - "sig_ol": final subband energy estimates, shape (M,) 275 """ 276 tic: float = time() 277 278 x = np.asarray(input_signal, dtype=float).ravel() 279 d = np.asarray(desired_signal, dtype=float).ravel() 280 281 n_samples: int = int(x.size) 282 283 xsb_list: list[np.ndarray] = [] 284 dsb_list: list[np.ndarray] = [] 285 for m in range(self.M): 286 xaux_x = fir_filter_causal(self.hk[m, :], x) 287 xaux_d = fir_filter_causal(self.hk[m, :], d) 288 xsb_list.append(_decimate_by_L(xaux_x, self.L)) 289 dsb_list.append(_decimate_by_L(xaux_d, self.L)) 290 291 N_iter: int = min(arr.size for arr in (xsb_list + dsb_list)) if (xsb_list and dsb_list) else 0 292 if N_iter == 0: 293 y0 = np.zeros_like(d) 294 runtime_s = float(time() - tic) 295 return self._pack_results( 296 outputs=y0, 297 errors=d - y0, 298 runtime_s=runtime_s, 299 error_type="output_error", 300 extra={ 301 "w_matrix_history": [], 302 "subband_outputs": np.zeros((self.M, 0), dtype=float), 303 "subband_errors": np.zeros((self.M, 0), dtype=float), 304 "mse_subbands": np.zeros((self.M, 0), dtype=float), 305 "mse_overall": np.zeros((0,), dtype=float), 306 }, 307 ) 308 309 xsb = np.vstack([arr[:N_iter] for arr in xsb_list]) 310 dsb = np.vstack([arr[:N_iter] for arr in dsb_list]) 311 312 y_sb = np.zeros((self.M, N_iter), dtype=float) 313 e_sb = np.zeros((self.M, N_iter), dtype=float) 314 315 x_ol = np.zeros((self.M, self.Nw + 1), dtype=float) 316 sig_ol = np.zeros((self.M,), dtype=float) 317 318 self.w_history = [] 319 self._record_history() 320 self.w_matrix_history = [] 321 322 for k in range(N_iter): 323 for m in range(self.M): 324 x_ol[m, 1:] = x_ol[m, :-1] 325 x_ol[m, 0] = xsb[m, k] 326 327 y_sb[m, k] = float(np.dot(self.w_mat[m, :], x_ol[m, :])) 328 e_sb[m, k] = float(dsb[m, k] - y_sb[m, k]) 329 330 sig_ol[m] = (1.0 - self.a) * sig_ol[m] + self.a * (xsb[m, k] ** 2) 331 332 mu_m = (self.step_size) / (self.gamma + (self.Nw + 1) * sig_ol[m]) 333 334 self.w_mat[m, :] = self.w_mat[m, :] + mu_m * e_sb[m, k] * x_ol[m, :] 335 336 self.w_matrix_history.append(self.w_mat.copy()) 337 self._sync_base_w() 338 self._record_history() 339 340 y_full = np.zeros((n_samples,), dtype=float) 341 for m in range(self.M): 342 y_up = _upsample_by_L(y_sb[m, :], self.L, n_samples) 343 y_full += fir_filter_causal(self.fk[m, :], y_up) 344 345 e_full = d - y_full 346 347 mse_subbands = e_sb ** 2 348 mse_overall = np.mean(mse_subbands, axis=0) 349 350 runtime_s: float = float(time() - tic) 351 if verbose: 352 print(f"[OLSBLMS] Completed in {runtime_s * 1000:.03f} ms | iters={N_iter}") 353 354 extra: Dict[str, Any] = { 355 "w_matrix_history": self.w_matrix_history, 356 "subband_outputs": y_sb, 357 "subband_errors": e_sb, 358 "mse_subbands": mse_subbands, 359 "mse_overall": mse_overall, 360 } 361 if return_internal_states: 362 extra["sig_ol"] = sig_ol.copy() 363 364 return self._pack_results( 365 outputs=y_full, 366 errors=e_full, 367 runtime_s=runtime_s, 368 error_type="output_error", 369 extra=extra, 370 )
Run OLSBLMS adaptation.
Parameters
input_signal : array_like of float Fullband input x[n], shape (N,). desired_signal : array_like of float Fullband desired d[n], shape (N,). verbose : bool, default=False If True, prints runtime and iteration count. return_internal_states : bool, default=False If True, returns additional internal states in result.extra.
Returns
OptimizationResult outputs : ndarray of float, shape (N,) Fullband reconstructed output y[n] obtained by synthesis of the subband outputs after adaptation. errors : ndarray of float, shape (N,) Fullband output error e[n] = d[n] - y[n]. coefficients : ndarray Flattened coefficient history of w_mat, shape (#snapshots, M*(Nw+1)), where snapshots are recorded once per subband-iteration (decimated-time step), plus the initial entry. error_type : str "output_error".
extra : dict
Always contains:
- "w_matrix_history": list of (M, Nw+1) coefficient snapshots
- "subband_outputs": ndarray (M, N_iter)
- "subband_errors": ndarray (M, N_iter)
- "mse_subbands": ndarray (M, N_iter) with e_sb**2
- "mse_overall": ndarray (N_iter,) mean subband MSE per iteration
If return_internal_states=True, also contains:
- "sig_ol": final subband energy estimates, shape (M,)
26class AffineProjectionCM(AdaptiveFilter): 27 """ 28 Complex Affine-Projection Constant-Modulus (AP-CM) adaptive filter. 29 30 Blind affine-projection algorithm based on the constant-modulus criterion, 31 following Diniz (Alg. 13.4). This implementation uses a *unit-modulus* 32 reference (i.e., target magnitude equal to 1) obtained by normalizing the 33 affine-projection output vector. 34 35 Parameters 36 ---------- 37 filter_order : int, optional 38 Adaptive FIR filter order ``M``. The number of coefficients is ``M + 1``. 39 Default is 5. 40 step_size : float, optional 41 Adaptation step size ``mu``. Default is 0.1. 42 memory_length : int, optional 43 Reuse factor ``L`` (number of past regressors reused). The affine- 44 projection block size is therefore ``P = L + 1`` columns. Default is 2. 45 gamma : float, optional 46 Levenberg-Marquardt regularization factor ``gamma`` used in the 47 ``(L + 1) x (L + 1)`` normal-equation system for numerical stability. 48 Default is 1e-6. 49 w_init : array_like of complex, optional 50 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 51 initializes with zeros. 52 53 Notes 54 ----- 55 At iteration ``k``, form the regressor block matrix: 56 57 - ``X(k) ∈ C^{(M+1) x (L+1)}``, whose columns are the most recent regressor 58 vectors (newest in column 0). 59 60 The affine-projection output vector is: 61 62 .. math:: 63 y_{ap}(k) = X^H(k) w(k) \\in \\mathbb{C}^{L+1}. 64 65 This implementation uses a *unit-circle projection* (normalization) as the 66 constant-modulus "reference": 67 68 .. math:: 69 d_{ap}(k) = \\frac{y_{ap}(k)}{|y_{ap}(k)|}, 70 71 applied element-wise, with a small threshold to avoid division by zero. 72 73 The error vector is: 74 75 .. math:: 76 e_{ap}(k) = d_{ap}(k) - y_{ap}(k). 77 78 The update direction ``g(k)`` is obtained by solving the regularized system: 79 80 .. math:: 81 (X^H(k) X(k) + \\gamma I_{L+1})\\, g(k) = e_{ap}(k), 82 83 and the coefficient update is: 84 85 .. math:: 86 w(k+1) = w(k) + \\mu X(k) g(k). 87 88 References 89 ---------- 90 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 91 Implementation*, 5th ed., Algorithm 13.4. 92 """ 93 supports_complex: bool = True 94 step_size: float 95 memory_length: int 96 gamma: float 97 n_coeffs: int 98 99 def __init__( 100 self, 101 filter_order: int = 5, 102 step_size: float = 0.1, 103 memory_length: int = 2, 104 gamma: float = 1e-6, 105 w_init: Optional[Union[np.ndarray, list]] = None, 106 ) -> None: 107 super().__init__(filter_order, w_init=w_init) 108 self.step_size = float(step_size) 109 self.memory_length = int(memory_length) 110 self.gamma = float(gamma) 111 self.n_coeffs = int(filter_order + 1) 112 113 def optimize( 114 self, 115 input_signal: Union[np.ndarray, list], 116 desired_signal: Optional[Union[np.ndarray, list]] = None, 117 verbose: bool = False, 118 return_internal_states: bool = False, 119 ) -> OptimizationResult: 120 """ 121 Executes the AP-CM adaptation loop over an input sequence. 122 123 Parameters 124 ---------- 125 input_signal : array_like of complex 126 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 127 desired_signal : None, optional 128 Ignored. This is a blind algorithm: the reference is derived from 129 the output via unit-modulus normalization. 130 verbose : bool, optional 131 If True, prints the total runtime after completion. 132 return_internal_states : bool, optional 133 If True, includes the last internal states in ``result.extra``: 134 ``"last_update_factor"`` (``g(k)``) and ``"last_regressor_matrix"`` 135 (``X(k)``). 136 137 Returns 138 ------- 139 OptimizationResult 140 Result object with fields: 141 - outputs : ndarray of complex, shape ``(N,)`` 142 Scalar output sequence, ``y[k] = y_ap(k)[0]``. 143 - errors : ndarray of complex, shape ``(N,)`` 144 Scalar CM error sequence, ``e[k] = e_ap(k)[0]``. 145 - coefficients : ndarray of complex 146 Coefficient history recorded by the base class. 147 - error_type : str 148 Set to ``"blind_constant_modulus"``. 149 - extra : dict, optional 150 Present only if ``return_internal_states=True``. 151 """ 152 tic: float = time() 153 154 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 155 n_samples: int = int(x.size) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 158 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 159 160 L: int = int(self.memory_length) 161 162 regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex) 163 I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex) 164 165 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 166 x_padded[self.filter_order:] = x 167 168 last_update_factor: Optional[np.ndarray] = None 169 170 for k in range(n_samples): 171 regressor_matrix[:, 1:] = regressor_matrix[:, :-1] 172 regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1] 173 174 output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w) 175 176 abs_out: np.ndarray = np.abs(output_ap) 177 desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex) 178 np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12) 179 180 error_ap: np.ndarray = desired_level - output_ap 181 182 phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg 183 update_factor: np.ndarray = np.linalg.solve(phi, error_ap) 184 last_update_factor = update_factor 185 186 self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor) 187 188 outputs[k] = output_ap[0] 189 errors[k] = error_ap[0] 190 191 self._record_history() 192 193 runtime_s: float = float(time() - tic) 194 if verbose: 195 print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms") 196 197 extra: Optional[Dict[str, Any]] = None 198 if return_internal_states: 199 extra = { 200 "last_update_factor": last_update_factor, 201 "last_regressor_matrix": regressor_matrix.copy(), 202 } 203 204 return self._pack_results( 205 outputs=outputs, 206 errors=errors, 207 runtime_s=runtime_s, 208 error_type="blind_constant_modulus", 209 extra=extra, 210 )
Complex Affine-Projection Constant-Modulus (AP-CM) adaptive filter.
Blind affine-projection algorithm based on the constant-modulus criterion, following Diniz (Alg. 13.4). This implementation uses a unit-modulus reference (i.e., target magnitude equal to 1) obtained by normalizing the affine-projection output vector.
Parameters
filter_order : int, optional
Adaptive FIR filter order M. The number of coefficients is M + 1.
Default is 5.
step_size : float, optional
Adaptation step size mu. Default is 0.1.
memory_length : int, optional
Reuse factor L (number of past regressors reused). The affine-
projection block size is therefore P = L + 1 columns. Default is 2.
gamma : float, optional
Levenberg-Marquardt regularization factor gamma used in the
(L + 1) x (L + 1) normal-equation system for numerical stability.
Default is 1e-6.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
At iteration k, form the regressor block matrix:
X(k) ∈ C^{(M+1) x (L+1)}, whose columns are the most recent regressor vectors (newest in column 0).
The affine-projection output vector is:
$$y_{ap}(k) = X^H(k) w(k) \in \mathbb{C}^{L+1}.$$
This implementation uses a unit-circle projection (normalization) as the constant-modulus "reference":
$$d_{ap}(k) = \frac{y_{ap}(k)}{|y_{ap}(k)|},$$
applied element-wise, with a small threshold to avoid division by zero.
The error vector is:
$$e_{ap}(k) = d_{ap}(k) - y_{ap}(k).$$
The update direction g(k) is obtained by solving the regularized system:
$$(X^H(k) X(k) + \gamma I_{L+1})\, g(k) = e_{ap}(k),$$
and the coefficient update is:
$$w(k+1) = w(k) + \mu X(k) g(k).$$
References
Implementation*, 5th ed., Algorithm 13.4.
99 def __init__( 100 self, 101 filter_order: int = 5, 102 step_size: float = 0.1, 103 memory_length: int = 2, 104 gamma: float = 1e-6, 105 w_init: Optional[Union[np.ndarray, list]] = None, 106 ) -> None: 107 super().__init__(filter_order, w_init=w_init) 108 self.step_size = float(step_size) 109 self.memory_length = int(memory_length) 110 self.gamma = float(gamma) 111 self.n_coeffs = int(filter_order + 1)
113 def optimize( 114 self, 115 input_signal: Union[np.ndarray, list], 116 desired_signal: Optional[Union[np.ndarray, list]] = None, 117 verbose: bool = False, 118 return_internal_states: bool = False, 119 ) -> OptimizationResult: 120 """ 121 Executes the AP-CM adaptation loop over an input sequence. 122 123 Parameters 124 ---------- 125 input_signal : array_like of complex 126 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 127 desired_signal : None, optional 128 Ignored. This is a blind algorithm: the reference is derived from 129 the output via unit-modulus normalization. 130 verbose : bool, optional 131 If True, prints the total runtime after completion. 132 return_internal_states : bool, optional 133 If True, includes the last internal states in ``result.extra``: 134 ``"last_update_factor"`` (``g(k)``) and ``"last_regressor_matrix"`` 135 (``X(k)``). 136 137 Returns 138 ------- 139 OptimizationResult 140 Result object with fields: 141 - outputs : ndarray of complex, shape ``(N,)`` 142 Scalar output sequence, ``y[k] = y_ap(k)[0]``. 143 - errors : ndarray of complex, shape ``(N,)`` 144 Scalar CM error sequence, ``e[k] = e_ap(k)[0]``. 145 - coefficients : ndarray of complex 146 Coefficient history recorded by the base class. 147 - error_type : str 148 Set to ``"blind_constant_modulus"``. 149 - extra : dict, optional 150 Present only if ``return_internal_states=True``. 151 """ 152 tic: float = time() 153 154 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 155 n_samples: int = int(x.size) 156 157 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 158 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 159 160 L: int = int(self.memory_length) 161 162 regressor_matrix: np.ndarray = np.zeros((self.n_coeffs, L + 1), dtype=complex) 163 I_reg: np.ndarray = (self.gamma * np.eye(L + 1)).astype(complex) 164 165 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 166 x_padded[self.filter_order:] = x 167 168 last_update_factor: Optional[np.ndarray] = None 169 170 for k in range(n_samples): 171 regressor_matrix[:, 1:] = regressor_matrix[:, :-1] 172 regressor_matrix[:, 0] = x_padded[k : k + self.filter_order + 1][::-1] 173 174 output_ap: np.ndarray = np.dot(np.conj(regressor_matrix).T, self.w) 175 176 abs_out: np.ndarray = np.abs(output_ap) 177 desired_level: np.ndarray = np.zeros_like(output_ap, dtype=complex) 178 np.divide(output_ap, abs_out, out=desired_level, where=abs_out > 1e-12) 179 180 error_ap: np.ndarray = desired_level - output_ap 181 182 phi: np.ndarray = np.dot(np.conj(regressor_matrix).T, regressor_matrix) + I_reg 183 update_factor: np.ndarray = np.linalg.solve(phi, error_ap) 184 last_update_factor = update_factor 185 186 self.w = self.w + self.step_size * np.dot(regressor_matrix, update_factor) 187 188 outputs[k] = output_ap[0] 189 errors[k] = error_ap[0] 190 191 self._record_history() 192 193 runtime_s: float = float(time() - tic) 194 if verbose: 195 print(f"[AffineProjectionCM] Completed in {runtime_s * 1000:.02f} ms") 196 197 extra: Optional[Dict[str, Any]] = None 198 if return_internal_states: 199 extra = { 200 "last_update_factor": last_update_factor, 201 "last_regressor_matrix": regressor_matrix.copy(), 202 } 203 204 return self._pack_results( 205 outputs=outputs, 206 errors=errors, 207 runtime_s=runtime_s, 208 error_type="blind_constant_modulus", 209 extra=extra, 210 )
Executes the AP-CM adaptation loop over an input sequence.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : None, optional
Ignored. This is a blind algorithm: the reference is derived from
the output via unit-modulus normalization.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes the last internal states in result.extra:
"last_update_factor" (g(k)) and "last_regressor_matrix"
(X(k)).
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Scalar output sequence, y[k] = y_ap(k)[0].
- errors : ndarray of complex, shape (N,)
Scalar CM error sequence, e[k] = e_ap(k)[0].
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "blind_constant_modulus".
- extra : dict, optional
Present only if return_internal_states=True.
25class CMA(AdaptiveFilter): 26 """ 27 Constant-Modulus Algorithm (CMA) for blind adaptive filtering (complex-valued). 28 29 The CMA adapts an FIR equalizer to produce an output with (approximately) 30 constant modulus, making it useful for blind equalization of constant-envelope 31 and near-constant-envelope modulations (e.g., PSK and some QAM regimes). 32 33 This implementation follows Diniz (Alg. 13.2) using the classical CMA(2,2) 34 instantaneous gradient approximation. 35 36 Parameters 37 ---------- 38 filter_order : int, optional 39 FIR filter order ``M``. The number of coefficients is ``M + 1``. 40 Default is 5. 41 step_size : float, optional 42 Adaptation step size ``mu``. Default is 0.01. 43 w_init : array_like of complex, optional 44 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 45 initializes with zeros. 46 47 Notes 48 ----- 49 Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the 50 filter output: 51 52 .. math:: 53 y(k) = w^H(k) x_k. 54 55 CMA(2,2) is commonly derived from minimizing the instantaneous cost: 56 57 .. math:: 58 J(k) = \\left(|y(k)|^2 - R_2\\right)^2, 59 60 where ``R2`` is the dispersion constant. Using an instantaneous gradient 61 approximation, define the scalar error: 62 63 .. math:: 64 e(k) = |y(k)|^2 - R_2, 65 66 and the (complex) gradient factor: 67 68 .. math:: 69 \\phi(k) = 2\\, e(k)\\, y^*(k). 70 71 The coefficient update is then: 72 73 .. math:: 74 w(k+1) = w(k) - \\mu\\, \\phi(k)\\, x_k. 75 76 Dispersion constant 77 ~~~~~~~~~~~~~~~~~~~ 78 In theory, ``R2`` depends on the source constellation statistics and is 79 often written as: 80 81 .. math:: 82 R_2 = \\frac{\\mathbb{E}[|s(k)|^4]}{\\mathbb{E}[|s(k)|^2]}. 83 84 In practice, when the source ``s(k)`` is not available (blind setting), 85 ``R2`` is typically chosen from prior knowledge of the modulation or 86 estimated from a proxy sequence. If this implementation estimates ``R2`` 87 from data, it should specify which sequence is used (e.g., input vs output). 88 89 References 90 ---------- 91 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 92 Implementation*, 5th ed., Algorithm 13.2. 93 """ 94 supports_complex: bool = True 95 step_size: float 96 n_coeffs: int 97 98 def __init__( 99 self, 100 filter_order: int = 5, 101 step_size: float = 0.01, 102 w_init: Optional[Union[np.ndarray, list]] = None, 103 ) -> None: 104 super().__init__(filter_order, w_init=w_init) 105 self.step_size = float(step_size) 106 self.n_coeffs = int(filter_order + 1) 107 108 def optimize( 109 self, 110 input_signal: Union[np.ndarray, list], 111 desired_signal: Optional[Union[np.ndarray, list]] = None, 112 verbose: bool = False, 113 return_internal_states: bool = False, 114 safe_eps: float = 1e-12, 115 ) -> OptimizationResult: 116 """ 117 Executes the CMA adaptation loop over an input sequence. 118 119 Parameters 120 ---------- 121 input_signal : array_like of complex 122 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 123 desired_signal : None, optional 124 Ignored. This is a blind algorithm: it does not require a desired 125 reference signal. 126 verbose : bool, optional 127 If True, prints the total runtime after completion. 128 return_internal_states : bool, optional 129 If True, includes internal quantities in ``result.extra`` (e.g., 130 the dispersion constant ``R2`` and/or the last/trajectory of 131 ``phi(k)`` depending on the implementation). 132 safe_eps : float, optional 133 Small epsilon used to avoid division by zero if ``R2`` is estimated 134 from sample moments. Default is 1e-12. 135 136 Returns 137 ------- 138 OptimizationResult 139 Result object with fields: 140 - outputs : ndarray of complex, shape ``(N,)`` 141 Output sequence ``y[k]``. 142 - errors : ndarray of float or complex, shape ``(N,)`` 143 CMA error sequence ``e[k] = |y(k)|^2 - R2`` (usually real-valued). 144 - coefficients : ndarray of complex 145 Coefficient history recorded by the base class. 146 - error_type : str 147 Set to ``"blind_constant_modulus"``. 148 - extra : dict, optional 149 Present only if ``return_internal_states=True``. 150 """ 151 tic: float = time() 152 153 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 154 n_samples: int = int(x.size) 155 156 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 157 errors: np.ndarray = np.zeros(n_samples, dtype=float) 158 159 denom: float = float(np.mean(np.abs(x) ** 2)) 160 if denom < safe_eps: 161 desired_level: float = 0.0 162 else: 163 desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps)) 164 165 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 166 167 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 168 x_padded[self.filter_order:] = x 169 170 for k in range(n_samples): 171 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 172 173 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 174 outputs[k] = y_k 175 176 e_k: float = float((np.abs(y_k) ** 2) - desired_level) 177 errors[k] = e_k 178 179 phi_k: complex = complex(2.0 * e_k * np.conj(y_k)) 180 if return_internal_states and phi_track is not None: 181 phi_track[k] = phi_k 182 183 self.w = self.w - self.step_size * phi_k * x_k 184 self._record_history() 185 186 runtime_s: float = float(time() - tic) 187 if verbose: 188 print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms") 189 190 extra: Optional[Dict[str, Any]] = None 191 if return_internal_states: 192 extra = { 193 "dispersion_constant": desired_level, 194 "instantaneous_phi": phi_track, 195 } 196 197 return self._pack_results( 198 outputs=outputs, 199 errors=errors, 200 runtime_s=runtime_s, 201 error_type="blind_constant_modulus", 202 extra=extra, 203 )
Constant-Modulus Algorithm (CMA) for blind adaptive filtering (complex-valued).
The CMA adapts an FIR equalizer to produce an output with (approximately) constant modulus, making it useful for blind equalization of constant-envelope and near-constant-envelope modulations (e.g., PSK and some QAM regimes).
This implementation follows Diniz (Alg. 13.2) using the classical CMA(2,2) instantaneous gradient approximation.
Parameters
filter_order : int, optional
FIR filter order M. The number of coefficients is M + 1.
Default is 5.
step_size : float, optional
Adaptation step size mu. Default is 0.01.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the
filter output:
$$y(k) = w^H(k) x_k.$$
CMA(2,2) is commonly derived from minimizing the instantaneous cost:
$$J(k) = \left(|y(k)|^2 - R_2\right)^2,$$
where R2 is the dispersion constant. Using an instantaneous gradient
approximation, define the scalar error:
$$e(k) = |y(k)|^2 - R_2,$$
and the (complex) gradient factor:
$$\phi(k) = 2\, e(k)\, y^*(k).$$
The coefficient update is then:
$$w(k+1) = w(k) - \mu\, \phi(k)\, x_k.$$
Dispersion constant
~~~~~~~
In theory, R2 depends on the source constellation statistics and is
often written as:
$$R_2 = \frac{\mathbb{E}[|s(k)|^4]}{\mathbb{E}[|s(k)|^2]}.$$
In practice, when the source s(k) is not available (blind setting),
R2 is typically chosen from prior knowledge of the modulation or
estimated from a proxy sequence. If this implementation estimates R2
from data, it should specify which sequence is used (e.g., input vs output).
References
108 def optimize( 109 self, 110 input_signal: Union[np.ndarray, list], 111 desired_signal: Optional[Union[np.ndarray, list]] = None, 112 verbose: bool = False, 113 return_internal_states: bool = False, 114 safe_eps: float = 1e-12, 115 ) -> OptimizationResult: 116 """ 117 Executes the CMA adaptation loop over an input sequence. 118 119 Parameters 120 ---------- 121 input_signal : array_like of complex 122 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 123 desired_signal : None, optional 124 Ignored. This is a blind algorithm: it does not require a desired 125 reference signal. 126 verbose : bool, optional 127 If True, prints the total runtime after completion. 128 return_internal_states : bool, optional 129 If True, includes internal quantities in ``result.extra`` (e.g., 130 the dispersion constant ``R2`` and/or the last/trajectory of 131 ``phi(k)`` depending on the implementation). 132 safe_eps : float, optional 133 Small epsilon used to avoid division by zero if ``R2`` is estimated 134 from sample moments. Default is 1e-12. 135 136 Returns 137 ------- 138 OptimizationResult 139 Result object with fields: 140 - outputs : ndarray of complex, shape ``(N,)`` 141 Output sequence ``y[k]``. 142 - errors : ndarray of float or complex, shape ``(N,)`` 143 CMA error sequence ``e[k] = |y(k)|^2 - R2`` (usually real-valued). 144 - coefficients : ndarray of complex 145 Coefficient history recorded by the base class. 146 - error_type : str 147 Set to ``"blind_constant_modulus"``. 148 - extra : dict, optional 149 Present only if ``return_internal_states=True``. 150 """ 151 tic: float = time() 152 153 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 154 n_samples: int = int(x.size) 155 156 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 157 errors: np.ndarray = np.zeros(n_samples, dtype=float) 158 159 denom: float = float(np.mean(np.abs(x) ** 2)) 160 if denom < safe_eps: 161 desired_level: float = 0.0 162 else: 163 desired_level = float(np.mean(np.abs(x) ** 4) / (denom + safe_eps)) 164 165 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 166 167 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 168 x_padded[self.filter_order:] = x 169 170 for k in range(n_samples): 171 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 172 173 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 174 outputs[k] = y_k 175 176 e_k: float = float((np.abs(y_k) ** 2) - desired_level) 177 errors[k] = e_k 178 179 phi_k: complex = complex(2.0 * e_k * np.conj(y_k)) 180 if return_internal_states and phi_track is not None: 181 phi_track[k] = phi_k 182 183 self.w = self.w - self.step_size * phi_k * x_k 184 self._record_history() 185 186 runtime_s: float = float(time() - tic) 187 if verbose: 188 print(f"[CMA] Completed in {runtime_s * 1000:.02f} ms") 189 190 extra: Optional[Dict[str, Any]] = None 191 if return_internal_states: 192 extra = { 193 "dispersion_constant": desired_level, 194 "instantaneous_phi": phi_track, 195 } 196 197 return self._pack_results( 198 outputs=outputs, 199 errors=errors, 200 runtime_s=runtime_s, 201 error_type="blind_constant_modulus", 202 extra=extra, 203 )
Executes the CMA adaptation loop over an input sequence.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : None, optional
Ignored. This is a blind algorithm: it does not require a desired
reference signal.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes internal quantities in result.extra (e.g.,
the dispersion constant R2 and/or the last/trajectory of
phi(k) depending on the implementation).
safe_eps : float, optional
Small epsilon used to avoid division by zero if R2 is estimated
from sample moments. Default is 1e-12.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Output sequence y[k].
- errors : ndarray of float or complex, shape (N,)
CMA error sequence e[k] = |y(k)|^2 - R2 (usually real-valued).
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "blind_constant_modulus".
- extra : dict, optional
Present only if return_internal_states=True.
25class Godard(AdaptiveFilter): 26 """ 27 Godard blind adaptive algorithm (complex-valued). 28 29 The Godard criterion generalizes constant-modulus equalization by using 30 exponents ``p`` and ``q`` in a family of dispersion-based cost functions. 31 It is commonly used for blind channel equalization and includes CMA(2,2) 32 as a special case. 33 34 This implementation follows Diniz (Alg. 13.1) and estimates the dispersion 35 constant ``R_q`` directly from the *input sequence* via sample moments. 36 37 Parameters 38 ---------- 39 filter_order : int, optional 40 FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 Default is 5. 42 step_size : float, optional 43 Adaptation step size ``mu``. Default is 0.01. 44 p_exponent : int, optional 45 Exponent ``p`` used in the Godard cost / gradient factor. Default is 2. 46 q_exponent : int, optional 47 Exponent ``q`` used in the modulus term. Default is 2. 48 w_init : array_like of complex, optional 49 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 50 initializes with zeros. 51 52 Notes 53 ----- 54 Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the 55 output: 56 57 .. math:: 58 y(k) = w^H(k) x_k. 59 60 Define the dispersion error (scalar): 61 62 .. math:: 63 e(k) = |y(k)|^q - R_q. 64 65 In this implementation, the dispersion constant is estimated from the input 66 using sample moments: 67 68 .. math:: 69 R_q \\approx \\frac{\\mathbb{E}[|x|^{2q}]}{\\mathbb{E}[|x|^q]} 70 \\approx \\frac{\\frac{1}{N}\\sum_k |x(k)|^{2q}} 71 {\\frac{1}{N}\\sum_k |x(k)|^q}, 72 73 with a small ``safe_eps`` to prevent division by zero. 74 75 The instantaneous complex gradient factor is computed as: 76 77 .. math:: 78 \\phi(k) = p\\,q\\, e(k)^{p-1}\\, |y(k)|^{q-2}\\, y^*(k), 79 80 and the coefficient update used here is: 81 82 .. math:: 83 w(k+1) = w(k) - \\frac{\\mu}{2}\\, \\phi(k)\\, x_k. 84 85 Numerical stability 86 ~~~~~~~~~~~~~~~~~~~ 87 When ``|y(k)|`` is very small, the term ``|y(k)|^{q-2}`` can be ill-defined 88 for ``q < 2`` or can amplify noise. This implementation sets ``phi(k)=0`` 89 when ``|y(k)| <= safe_eps``. 90 91 References 92 ---------- 93 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 94 Implementation*, 5th ed., Algorithm 13.1. 95 """ 96 97 supports_complex: bool = True 98 step_size: float 99 p: int 100 q: int 101 n_coeffs: int 102 103 def __init__( 104 self, 105 filter_order: int = 5, 106 step_size: float = 0.01, 107 p_exponent: int = 2, 108 q_exponent: int = 2, 109 w_init: Optional[Union[np.ndarray, list]] = None, 110 ) -> None: 111 super().__init__(filter_order, w_init=w_init) 112 self.step_size = float(step_size) 113 self.p = int(p_exponent) 114 self.q = int(q_exponent) 115 self.n_coeffs = int(filter_order + 1) 116 117 def optimize( 118 self, 119 input_signal: Union[np.ndarray, list], 120 desired_signal: Optional[Union[np.ndarray, list]] = None, 121 verbose: bool = False, 122 return_internal_states: bool = False, 123 safe_eps: float = 1e-12, 124 ) -> OptimizationResult: 125 """ 126 Executes the Godard adaptation loop over an input sequence. 127 128 Parameters 129 ---------- 130 input_signal : array_like of complex 131 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 132 desired_signal : None, optional 133 Ignored. This is a blind algorithm: no desired reference is used. 134 verbose : bool, optional 135 If True, prints the total runtime after completion. 136 return_internal_states : bool, optional 137 If True, includes internal signals in ``result.extra``: 138 ``"dispersion_constant"`` (estimated ``R_q``) and ``"phi_gradient"`` 139 (trajectory of ``phi(k)`` with shape ``(N,)``). 140 safe_eps : float, optional 141 Small epsilon used to avoid division by zero when estimating 142 ``R_q`` and to gate the computation of ``phi(k)`` when ``|y(k)|`` is 143 close to zero. Default is 1e-12. 144 145 Returns 146 ------- 147 OptimizationResult 148 Result object with fields: 149 - outputs : ndarray of complex, shape ``(N,)`` 150 Output sequence ``y[k]``. 151 - errors : ndarray of float, shape ``(N,)`` 152 Dispersion error sequence ``e[k] = |y(k)|^q - R_q``. 153 - coefficients : ndarray of complex 154 Coefficient history recorded by the base class. 155 - error_type : str 156 Set to ``"blind_godard"``. 157 - extra : dict, optional 158 Present only if ``return_internal_states=True``. 159 """ 160 tic: float = time() 161 162 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 163 n_samples: int = int(x.size) 164 165 num: float = float(np.mean(np.abs(x) ** (2 * self.q))) 166 den: float = float(np.mean(np.abs(x) ** self.q)) 167 desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 168 169 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 170 errors: np.ndarray = np.zeros(n_samples, dtype=float) 171 172 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 173 174 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 175 x_padded[self.filter_order:] = x 176 177 for k in range(n_samples): 178 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 179 180 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 181 outputs[k] = y_k 182 183 e_k: float = float((np.abs(y_k) ** self.q) - desired_level) 184 errors[k] = e_k 185 186 if np.abs(y_k) > safe_eps: 187 phi_k: complex = complex( 188 self.p 189 * self.q 190 * (e_k ** (self.p - 1)) 191 * (np.abs(y_k) ** (self.q - 2)) 192 * np.conj(y_k) 193 ) 194 else: 195 phi_k = 0.0 + 0.0j 196 197 if return_internal_states and phi_track is not None: 198 phi_track[k] = phi_k 199 200 self.w = self.w - (self.step_size * phi_k * x_k) / 2.0 201 self._record_history() 202 203 runtime_s: float = float(time() - tic) 204 if verbose: 205 print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms") 206 207 extra: Optional[Dict[str, Any]] = None 208 if return_internal_states: 209 extra = { 210 "phi_gradient": phi_track, 211 "dispersion_constant": desired_level, 212 } 213 214 return self._pack_results( 215 outputs=outputs, 216 errors=errors, 217 runtime_s=runtime_s, 218 error_type="blind_godard", 219 extra=extra, 220 )
Godard blind adaptive algorithm (complex-valued).
The Godard criterion generalizes constant-modulus equalization by using
exponents p and q in a family of dispersion-based cost functions.
It is commonly used for blind channel equalization and includes CMA(2,2)
as a special case.
This implementation follows Diniz (Alg. 13.1) and estimates the dispersion
constant R_q directly from the input sequence via sample moments.
Parameters
filter_order : int, optional
FIR filter order M. The number of coefficients is M + 1.
Default is 5.
step_size : float, optional
Adaptation step size mu. Default is 0.01.
p_exponent : int, optional
Exponent p used in the Godard cost / gradient factor. Default is 2.
q_exponent : int, optional
Exponent q used in the modulus term. Default is 2.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the
output:
$$y(k) = w^H(k) x_k.$$
Define the dispersion error (scalar):
$$e(k) = |y(k)|^q - R_q.$$
In this implementation, the dispersion constant is estimated from the input using sample moments:
$$R_q \approx \frac{\mathbb{E}[|x|^{2q}]}{\mathbb{E}[|x|^q]} \approx \frac{\frac{1}{N}\sum_k |x(k)|^{2q}} {\frac{1}{N}\sum_k |x(k)|^q},$$
with a small safe_eps to prevent division by zero.
The instantaneous complex gradient factor is computed as:
$$\phi(k) = p\,q\, e(k)^{p-1}\, |y(k)|^{q-2}\, y^*(k),$$
and the coefficient update used here is:
$$w(k+1) = w(k) - \frac{\mu}{2}\, \phi(k)\, x_k.$$
Numerical stability
~~~~~~~
When |y(k)| is very small, the term |y(k)|^{q-2} can be ill-defined
for q < 2 or can amplify noise. This implementation sets phi(k)=0
when |y(k)| <= safe_eps.
References
103 def __init__( 104 self, 105 filter_order: int = 5, 106 step_size: float = 0.01, 107 p_exponent: int = 2, 108 q_exponent: int = 2, 109 w_init: Optional[Union[np.ndarray, list]] = None, 110 ) -> None: 111 super().__init__(filter_order, w_init=w_init) 112 self.step_size = float(step_size) 113 self.p = int(p_exponent) 114 self.q = int(q_exponent) 115 self.n_coeffs = int(filter_order + 1)
117 def optimize( 118 self, 119 input_signal: Union[np.ndarray, list], 120 desired_signal: Optional[Union[np.ndarray, list]] = None, 121 verbose: bool = False, 122 return_internal_states: bool = False, 123 safe_eps: float = 1e-12, 124 ) -> OptimizationResult: 125 """ 126 Executes the Godard adaptation loop over an input sequence. 127 128 Parameters 129 ---------- 130 input_signal : array_like of complex 131 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 132 desired_signal : None, optional 133 Ignored. This is a blind algorithm: no desired reference is used. 134 verbose : bool, optional 135 If True, prints the total runtime after completion. 136 return_internal_states : bool, optional 137 If True, includes internal signals in ``result.extra``: 138 ``"dispersion_constant"`` (estimated ``R_q``) and ``"phi_gradient"`` 139 (trajectory of ``phi(k)`` with shape ``(N,)``). 140 safe_eps : float, optional 141 Small epsilon used to avoid division by zero when estimating 142 ``R_q`` and to gate the computation of ``phi(k)`` when ``|y(k)|`` is 143 close to zero. Default is 1e-12. 144 145 Returns 146 ------- 147 OptimizationResult 148 Result object with fields: 149 - outputs : ndarray of complex, shape ``(N,)`` 150 Output sequence ``y[k]``. 151 - errors : ndarray of float, shape ``(N,)`` 152 Dispersion error sequence ``e[k] = |y(k)|^q - R_q``. 153 - coefficients : ndarray of complex 154 Coefficient history recorded by the base class. 155 - error_type : str 156 Set to ``"blind_godard"``. 157 - extra : dict, optional 158 Present only if ``return_internal_states=True``. 159 """ 160 tic: float = time() 161 162 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 163 n_samples: int = int(x.size) 164 165 num: float = float(np.mean(np.abs(x) ** (2 * self.q))) 166 den: float = float(np.mean(np.abs(x) ** self.q)) 167 desired_level: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 168 169 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 170 errors: np.ndarray = np.zeros(n_samples, dtype=float) 171 172 phi_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 173 174 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 175 x_padded[self.filter_order:] = x 176 177 for k in range(n_samples): 178 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 179 180 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 181 outputs[k] = y_k 182 183 e_k: float = float((np.abs(y_k) ** self.q) - desired_level) 184 errors[k] = e_k 185 186 if np.abs(y_k) > safe_eps: 187 phi_k: complex = complex( 188 self.p 189 * self.q 190 * (e_k ** (self.p - 1)) 191 * (np.abs(y_k) ** (self.q - 2)) 192 * np.conj(y_k) 193 ) 194 else: 195 phi_k = 0.0 + 0.0j 196 197 if return_internal_states and phi_track is not None: 198 phi_track[k] = phi_k 199 200 self.w = self.w - (self.step_size * phi_k * x_k) / 2.0 201 self._record_history() 202 203 runtime_s: float = float(time() - tic) 204 if verbose: 205 print(f"[Godard] Completed in {runtime_s * 1000:.02f} ms") 206 207 extra: Optional[Dict[str, Any]] = None 208 if return_internal_states: 209 extra = { 210 "phi_gradient": phi_track, 211 "dispersion_constant": desired_level, 212 } 213 214 return self._pack_results( 215 outputs=outputs, 216 errors=errors, 217 runtime_s=runtime_s, 218 error_type="blind_godard", 219 extra=extra, 220 )
Executes the Godard adaptation loop over an input sequence.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : None, optional
Ignored. This is a blind algorithm: no desired reference is used.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes internal signals in result.extra:
"dispersion_constant" (estimated R_q) and "phi_gradient"
(trajectory of phi(k) with shape (N,)).
safe_eps : float, optional
Small epsilon used to avoid division by zero when estimating
R_q and to gate the computation of phi(k) when |y(k)| is
close to zero. Default is 1e-12.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Output sequence y[k].
- errors : ndarray of float, shape (N,)
Dispersion error sequence e[k] = |y(k)|^q - R_q.
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "blind_godard".
- extra : dict, optional
Present only if return_internal_states=True.
25class Sato(AdaptiveFilter): 26 """ 27 Sato blind adaptive algorithm (complex-valued). 28 29 The Sato criterion is an early blind equalization method particularly 30 associated with multilevel PAM/QAM-type signals. It adapts an FIR equalizer 31 by pulling the output toward a fixed magnitude level through the complex 32 sign function, using a dispersion constant ``zeta``. 33 34 This implementation follows Diniz (Alg. 13.3) and estimates ``zeta`` from 35 the *input sequence* via sample moments. 36 37 Parameters 38 ---------- 39 filter_order : int, optional 40 FIR filter order ``M``. The number of coefficients is ``M + 1``. 41 Default is 5. 42 step_size : float, optional 43 Adaptation step size ``mu``. Default is 0.01. 44 w_init : array_like of complex, optional 45 Initial coefficient vector ``w(0)`` with shape ``(M + 1,)``. If None, 46 initializes with zeros. 47 48 Notes 49 ----- 50 Let the regressor vector be ``x_k = [x[k], x[k-1], ..., x[k-M]]^T`` and the 51 output: 52 53 .. math:: 54 y(k) = w^H(k) x_k. 55 56 Define the complex sign function (unit-circle projection): 57 58 .. math:: 59 \\mathrm{csgn}(y) = 60 \\begin{cases} 61 \\dfrac{y}{|y|}, & |y| > 0 \\\\ 62 0, & |y| = 0 63 \\end{cases} 64 65 The Sato error is: 66 67 .. math:: 68 e(k) = y(k) - \\zeta\\, \\mathrm{csgn}(y(k)). 69 70 The coefficient update used here is: 71 72 .. math:: 73 w(k+1) = w(k) - \\mu\\, e^*(k)\\, x_k. 74 75 Dispersion constant 76 ~~~~~~~~~~~~~~~~~~~ 77 In this implementation, the dispersion constant is estimated from the input 78 using sample moments: 79 80 .. math:: 81 \\zeta \\approx \\frac{\\mathbb{E}[|x|^2]}{\\mathbb{E}[|x|]} 82 \\approx \\frac{\\frac{1}{N}\\sum_k |x(k)|^2} 83 {\\frac{1}{N}\\sum_k |x(k)|}, 84 85 with a small ``safe_eps`` to avoid division by zero. 86 87 Numerical stability 88 ~~~~~~~~~~~~~~~~~~~ 89 To avoid instability when ``|y(k)|`` is very small, this implementation 90 sets ``csgn(y(k)) = 0`` when ``|y(k)| <= safe_eps``. 91 92 References 93 ---------- 94 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 95 Implementation*, 5th ed., Algorithm 13.3. 96 """ 97 98 supports_complex: bool = True 99 step_size: float 100 n_coeffs: int 101 102 def __init__( 103 self, 104 filter_order: int = 5, 105 step_size: float = 0.01, 106 w_init: Optional[Union[np.ndarray, list]] = None, 107 ) -> None: 108 super().__init__(filter_order, w_init=w_init) 109 self.step_size = float(step_size) 110 self.n_coeffs = int(filter_order + 1) 111 112 def optimize( 113 self, 114 input_signal: Union[np.ndarray, list], 115 desired_signal: Optional[Union[np.ndarray, list]] = None, 116 verbose: bool = False, 117 return_internal_states: bool = False, 118 safe_eps: float = 1e-12, 119 ) -> OptimizationResult: 120 """ 121 Executes the Sato adaptation loop over an input sequence. 122 123 Parameters 124 ---------- 125 input_signal : array_like of complex 126 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 127 desired_signal : None, optional 128 Ignored. This is a blind algorithm: no desired reference is used. 129 verbose : bool, optional 130 If True, prints the total runtime after completion. 131 return_internal_states : bool, optional 132 If True, includes internal signals in ``result.extra``: 133 ``"dispersion_constant"`` (estimated ``zeta``) and 134 ``"sato_sign_track"`` (trajectory of ``csgn(y(k))`` with shape 135 ``(N,)``). 136 safe_eps : float, optional 137 Small epsilon used to avoid division by zero when estimating 138 ``zeta`` and to gate the computation of ``csgn(y(k))`` when ``|y(k)|`` 139 is close to zero. Default is 1e-12. 140 141 Returns 142 ------- 143 OptimizationResult 144 Result object with fields: 145 - outputs : ndarray of complex, shape ``(N,)`` 146 Output sequence ``y[k]``. 147 - errors : ndarray of complex, shape ``(N,)`` 148 Sato error sequence ``e[k] = y(k) - zeta*csgn(y(k))``. 149 - coefficients : ndarray of complex 150 Coefficient history recorded by the base class. 151 - error_type : str 152 Set to ``"blind_sato"``. 153 - extra : dict, optional 154 Present only if ``return_internal_states=True``. 155 """ 156 tic: float = time() 157 158 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 159 n_samples: int = int(x.size) 160 161 num: float = float(np.mean(np.abs(x) ** 2)) 162 den: float = float(np.mean(np.abs(x))) 163 dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 164 165 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 166 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 167 168 sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 169 170 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 171 x_padded[self.filter_order:] = x 172 173 for k in range(n_samples): 174 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 175 176 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 177 outputs[k] = y_k 178 179 mag: float = float(np.abs(y_k)) 180 sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j) 181 182 if return_internal_states and sign_track is not None: 183 sign_track[k] = sato_sign 184 185 e_k: complex = y_k - sato_sign * dispersion_constant 186 errors[k] = e_k 187 188 self.w = self.w - self.step_size * np.conj(e_k) * x_k 189 self._record_history() 190 191 runtime_s: float = float(time() - tic) 192 if verbose: 193 print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms") 194 195 extra: Optional[Dict[str, Any]] = None 196 if return_internal_states: 197 extra = { 198 "sato_sign_track": sign_track, 199 "dispersion_constant": dispersion_constant, 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="blind_sato", 207 extra=extra, 208 )
Sato blind adaptive algorithm (complex-valued).
The Sato criterion is an early blind equalization method particularly
associated with multilevel PAM/QAM-type signals. It adapts an FIR equalizer
by pulling the output toward a fixed magnitude level through the complex
sign function, using a dispersion constant zeta.
This implementation follows Diniz (Alg. 13.3) and estimates zeta from
the input sequence via sample moments.
Parameters
filter_order : int, optional
FIR filter order M. The number of coefficients is M + 1.
Default is 5.
step_size : float, optional
Adaptation step size mu. Default is 0.01.
w_init : array_like of complex, optional
Initial coefficient vector w(0) with shape (M + 1,). If None,
initializes with zeros.
Notes
Let the regressor vector be x_k = [x[k], x[k-1], ..., x[k-M]]^T and the
output:
$$y(k) = w^H(k) x_k.$$
Define the complex sign function (unit-circle projection):
$$\mathrm{csgn}(y) = \begin{cases} \dfrac{y}{|y|}, & |y| > 0 \ 0, & |y| = 0 \end{cases}$$
The Sato error is:
$$e(k) = y(k) - \zeta\, \mathrm{csgn}(y(k)).$$
The coefficient update used here is:
$$w(k+1) = w(k) - \mu\, e^*(k)\, x_k.$$
Dispersion constant
~~~~~~~
In this implementation, the dispersion constant is estimated from the input
using sample moments:
$$\zeta \approx \frac{\mathbb{E}[|x|^2]}{\mathbb{E}[|x|]} \approx \frac{\frac{1}{N}\sum_k |x(k)|^2} {\frac{1}{N}\sum_k |x(k)|},$$
with a small safe_eps to avoid division by zero.
Numerical stability
~~~~~~~
To avoid instability when |y(k)| is very small, this implementation
sets csgn(y(k)) = 0 when |y(k)| <= safe_eps.
References
112 def optimize( 113 self, 114 input_signal: Union[np.ndarray, list], 115 desired_signal: Optional[Union[np.ndarray, list]] = None, 116 verbose: bool = False, 117 return_internal_states: bool = False, 118 safe_eps: float = 1e-12, 119 ) -> OptimizationResult: 120 """ 121 Executes the Sato adaptation loop over an input sequence. 122 123 Parameters 124 ---------- 125 input_signal : array_like of complex 126 Input sequence ``x[k]`` with shape ``(N,)`` (will be flattened). 127 desired_signal : None, optional 128 Ignored. This is a blind algorithm: no desired reference is used. 129 verbose : bool, optional 130 If True, prints the total runtime after completion. 131 return_internal_states : bool, optional 132 If True, includes internal signals in ``result.extra``: 133 ``"dispersion_constant"`` (estimated ``zeta``) and 134 ``"sato_sign_track"`` (trajectory of ``csgn(y(k))`` with shape 135 ``(N,)``). 136 safe_eps : float, optional 137 Small epsilon used to avoid division by zero when estimating 138 ``zeta`` and to gate the computation of ``csgn(y(k))`` when ``|y(k)|`` 139 is close to zero. Default is 1e-12. 140 141 Returns 142 ------- 143 OptimizationResult 144 Result object with fields: 145 - outputs : ndarray of complex, shape ``(N,)`` 146 Output sequence ``y[k]``. 147 - errors : ndarray of complex, shape ``(N,)`` 148 Sato error sequence ``e[k] = y(k) - zeta*csgn(y(k))``. 149 - coefficients : ndarray of complex 150 Coefficient history recorded by the base class. 151 - error_type : str 152 Set to ``"blind_sato"``. 153 - extra : dict, optional 154 Present only if ``return_internal_states=True``. 155 """ 156 tic: float = time() 157 158 x: np.ndarray = np.asarray(input_signal, dtype=complex).ravel() 159 n_samples: int = int(x.size) 160 161 num: float = float(np.mean(np.abs(x) ** 2)) 162 den: float = float(np.mean(np.abs(x))) 163 dispersion_constant: float = float(num / (den + safe_eps)) if den > safe_eps else 0.0 164 165 outputs: np.ndarray = np.zeros(n_samples, dtype=complex) 166 errors: np.ndarray = np.zeros(n_samples, dtype=complex) 167 168 sign_track: Optional[np.ndarray] = np.zeros(n_samples, dtype=complex) if return_internal_states else None 169 170 x_padded: np.ndarray = np.zeros(n_samples + self.filter_order, dtype=complex) 171 x_padded[self.filter_order:] = x 172 173 for k in range(n_samples): 174 x_k: np.ndarray = x_padded[k : k + self.filter_order + 1][::-1] 175 176 y_k: complex = complex(np.dot(np.conj(self.w), x_k)) 177 outputs[k] = y_k 178 179 mag: float = float(np.abs(y_k)) 180 sato_sign: complex = (y_k / mag) if mag > safe_eps else (0.0 + 0.0j) 181 182 if return_internal_states and sign_track is not None: 183 sign_track[k] = sato_sign 184 185 e_k: complex = y_k - sato_sign * dispersion_constant 186 errors[k] = e_k 187 188 self.w = self.w - self.step_size * np.conj(e_k) * x_k 189 self._record_history() 190 191 runtime_s: float = float(time() - tic) 192 if verbose: 193 print(f"[Sato] Completed in {runtime_s * 1000:.02f} ms") 194 195 extra: Optional[Dict[str, Any]] = None 196 if return_internal_states: 197 extra = { 198 "sato_sign_track": sign_track, 199 "dispersion_constant": dispersion_constant, 200 } 201 202 return self._pack_results( 203 outputs=outputs, 204 errors=errors, 205 runtime_s=runtime_s, 206 error_type="blind_sato", 207 extra=extra, 208 )
Executes the Sato adaptation loop over an input sequence.
Parameters
input_signal : array_like of complex
Input sequence x[k] with shape (N,) (will be flattened).
desired_signal : None, optional
Ignored. This is a blind algorithm: no desired reference is used.
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, includes internal signals in result.extra:
"dispersion_constant" (estimated zeta) and
"sato_sign_track" (trajectory of csgn(y(k)) with shape
(N,)).
safe_eps : float, optional
Small epsilon used to avoid division by zero when estimating
zeta and to gate the computation of csgn(y(k)) when |y(k)|
is close to zero. Default is 1e-12.
Returns
OptimizationResult
Result object with fields:
- outputs : ndarray of complex, shape (N,)
Output sequence y[k].
- errors : ndarray of complex, shape (N,)
Sato error sequence e[k] = y(k) - zeta*csgn(y(k)).
- coefficients : ndarray of complex
Coefficient history recorded by the base class.
- error_type : str
Set to "blind_sato".
- extra : dict, optional
Present only if return_internal_states=True.
28class Kalman(AdaptiveFilter): 29 """ 30 Kalman filter for state estimation (real or complex-valued). 31 32 Implements the discrete-time Kalman filter recursion for linear state-space 33 models with additive process and measurement noise. Matrices may be constant 34 (single ``ndarray``) or time-varying (a sequence of arrays indexed by ``k``). 35 36 The model used is: 37 38 .. math:: 39 x(k) = A(k-1) x(k-1) + B(k) n(k), 40 41 .. math:: 42 y(k) = C^T(k) x(k) + n_1(k), 43 44 where :math:`n(k)` is the process noise with covariance :math:`R_n(k)` and 45 :math:`n_1(k)` is the measurement noise with covariance :math:`R_{n1}(k)`. 46 47 Notes 48 ----- 49 API integration 50 ~~~~~~~~~~~~~~~ 51 This class inherits from :class:`~pydaptivefiltering.base.AdaptiveFilter` to 52 share a common interface. Here, the "weights" are the state estimate: 53 ``self.w`` stores the current state vector (flattened), and 54 ``self.w_history`` stores the covariance matrices over time. 55 56 Time-varying matrices 57 ~~~~~~~~~~~~~~~~~~~~~ 58 Any of ``A``, ``C_T``, ``B``, ``Rn``, ``Rn1`` may be provided either as: 59 - a constant ``ndarray``, used for all k; or 60 - a sequence (list/tuple) of ``ndarray``, where element ``k`` is used at time k. 61 62 Dimensions 63 ~~~~~~~~~~ 64 Let ``n`` be the state dimension, ``p`` the measurement dimension, and ``q`` 65 the process-noise dimension. Then: 66 67 - ``A(k)`` has shape ``(n, n)`` 68 - ``C_T(k)`` has shape ``(p, n)`` (note: this is :math:`C^T`) 69 - ``B(k)`` has shape ``(n, q)`` 70 - ``Rn(k)`` has shape ``(q, q)`` 71 - ``Rn1(k)`` has shape ``(p, p)`` 72 73 If ``B`` is not provided, the implementation uses ``B = I`` (thus ``q = n``), 74 and expects ``Rn`` to be shape ``(n, n)``. 75 76 Parameters 77 ---------- 78 A : ndarray or Sequence[ndarray] 79 State transition matrix :math:`A(k-1)` with shape ``(n, n)``. 80 C_T : ndarray or Sequence[ndarray] 81 Measurement matrix :math:`C^T(k)` with shape ``(p, n)``. 82 Rn : ndarray or Sequence[ndarray] 83 Process noise covariance :math:`R_n(k)` with shape ``(q, q)``. 84 Rn1 : ndarray or Sequence[ndarray] 85 Measurement noise covariance :math:`R_{n1}(k)` with shape ``(p, p)``. 86 B : ndarray or Sequence[ndarray], optional 87 Process noise input matrix :math:`B(k)` with shape ``(n, q)``. 88 If None, uses identity. 89 x_init : ndarray, optional 90 Initial state estimate :math:`x(0|0)`. Accepts shapes compatible with 91 ``(n,)``, ``(n,1)``, or ``(1,n)``. If None, initializes with zeros. 92 Re_init : ndarray, optional 93 Initial estimation error covariance :math:`R_e(0|0)` with shape ``(n, n)``. 94 If None, initializes with identity. 95 96 References 97 ---------- 98 .. [1] P. S. R. Diniz, *Adaptive Filtering: Algorithms and Practical 99 Implementation*, Algorithm 17.1. 100 """ 101 supports_complex: bool = True 102 103 A: Union[np.ndarray, Sequence[np.ndarray]] 104 C_T: Union[np.ndarray, Sequence[np.ndarray]] 105 Rn: Union[np.ndarray, Sequence[np.ndarray]] 106 Rn1: Union[np.ndarray, Sequence[np.ndarray]] 107 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] 108 109 x: np.ndarray 110 Re: np.ndarray 111 112 def __init__( 113 self, 114 A: Union[np.ndarray, Sequence[np.ndarray]], 115 C_T: Union[np.ndarray, Sequence[np.ndarray]], 116 Rn: Union[np.ndarray, Sequence[np.ndarray]], 117 Rn1: Union[np.ndarray, Sequence[np.ndarray]], 118 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None, 119 x_init: Optional[np.ndarray] = None, 120 Re_init: Optional[np.ndarray] = None, 121 ) -> None: 122 A0 = mat_at_k(A, 0) 123 if A0.ndim != 2 or A0.shape[0] != A0.shape[1]: 124 raise ValueError(f"A must be square (n,n). Got {A0.shape}.") 125 n = int(A0.shape[0]) 126 127 super().__init__(filter_order=n - 1, w_init=None) 128 129 self.A = A 130 self.C_T = C_T 131 self.Rn = Rn 132 self.Rn1 = Rn1 133 self.B = B 134 135 dtype = np.result_type( 136 A0, mat_at_k(C_T, 0), mat_at_k(Rn, 0), mat_at_k(Rn1, 0) 137 ) 138 dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128 139 140 self._dtype = dtype 141 self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype) 142 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 143 144 if x_init is None: 145 x0 = np.zeros((n, 1), dtype=dtype) 146 else: 147 x0 = as_2d_col(np.asarray(x_init, dtype=dtype)) 148 if x0.shape[0] != n: 149 raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.") 150 self.x = x0 151 152 if Re_init is None: 153 Re0 = np.eye(n, dtype=dtype) 154 else: 155 Re0 = np.asarray(Re_init, dtype=dtype) 156 if Re0.shape != (n, n): 157 raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.") 158 self.Re = Re0 159 160 self.w = self.x[:, 0].copy() 161 self.w_history = [] 162 163 def _validate_step_shapes( 164 self, 165 A: np.ndarray, 166 C_T: np.ndarray, 167 Rn: np.ndarray, 168 Rn1: np.ndarray, 169 B: np.ndarray, 170 ) -> None: 171 n = int(self.x.shape[0]) 172 if A.shape != (n, n): 173 raise ValueError(f"A(k) must be {(n,n)}. Got {A.shape}.") 174 if C_T.ndim != 2 or C_T.shape[1] != n: 175 raise ValueError(f"C_T(k) must be (p,n) with n={n}. Got {C_T.shape}.") 176 p = int(C_T.shape[0]) 177 if Rn1.shape != (p, p): 178 raise ValueError(f"Rn1(k) must be {(p,p)}. Got {Rn1.shape}.") 179 if B.ndim != 2 or B.shape[0] != n: 180 raise ValueError(f"B(k) must be (n,q) with n={n}. Got {B.shape}.") 181 q = int(B.shape[1]) 182 if Rn.shape != (q, q): 183 raise ValueError(f"Rn(k) must be {(q,q)}. Got {Rn.shape}.") 184 185 def step( 186 self, 187 y_k: ArrayLike, 188 *, 189 k: int, 190 safe_eps: float = 1e-12, 191 ) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: 192 """ 193 Perform ONE Kalman update step using measurement y(k). 194 195 This enables online / decision-directed usage, while still supporting 196 time-varying matrices via the global index `k`. 197 198 Parameters 199 ---------- 200 y_k : array_like 201 Measurement at time k. Accepted shapes: 202 - scalar (for p=1) 203 - (p,), (p,1), (1,p) 204 k : int 205 Global iteration index (used to index time-varying matrices). 206 safe_eps : float 207 Regularization epsilon used if innovation covariance solve fails. 208 209 Returns 210 ------- 211 x_hat : ndarray 212 Posterior state estimate x(k|k), shape (n,). 213 innovation : ndarray 214 Innovation v(k) = y(k) - C^T(k) x(k|k-1), shape (p,). 215 extra_step : dict 216 Contains per-step internal matrices: 217 - "kalman_gain": K (n,p) 218 - "predicted_state": x_pred (n,) 219 - "predicted_cov": Re_pred (n,n) 220 - "innovation_cov": S (p,p) 221 """ 222 A_k = np.asarray(mat_at_k(self.A, k), dtype=self._dtype) 223 C_T_k = np.asarray(mat_at_k(self.C_T, k), dtype=self._dtype) 224 Rn_k = np.asarray(mat_at_k(self.Rn, k), dtype=self._dtype) 225 Rn1_k = np.asarray(mat_at_k(self.Rn1, k), dtype=self._dtype) 226 227 n = int(self.x.shape[0]) 228 I_n = np.eye(n, dtype=self._dtype) 229 230 B_k = np.asarray( 231 mat_at_k(self.B, k) if self.B is not None else I_n, 232 dtype=self._dtype, 233 ) 234 235 self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k) 236 237 # Normalize y_k into (p,1) 238 y_vec = as_2d_col(np.asarray(y_k, dtype=self._dtype).ravel()) 239 240 C_k = C_T_k.conj().T # (n,p) 241 242 # Predict 243 x_pred = A_k @ self.x 244 Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T) 245 246 # Innovation 247 e_k = y_vec - (C_T_k @ x_pred) # (p,1) 248 S = (C_T_k @ Re_pred @ C_k) + Rn1_k # (p,p) 249 250 RC = Re_pred @ C_k # (n,p) 251 252 # Gain: solve S^T * K^T = (RC)^T, robust to singularities 253 p_dim = int(C_T_k.shape[0]) 254 try: 255 K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T 256 except np.linalg.LinAlgError: 257 S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype)) 258 K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T 259 260 # Update 261 self.x = x_pred + (K @ e_k) 262 self.Re = (I_n - (K @ C_T_k)) @ Re_pred 263 264 # Keep AdaptiveFilter "weights" in sync 265 self.w = self.x[:, 0].copy() 266 267 x_hat = self.x[:, 0].copy() 268 innovation = e_k[:, 0].copy() 269 270 extra_step = { 271 "kalman_gain": K, 272 "predicted_state": x_pred[:, 0].copy(), 273 "predicted_cov": Re_pred.copy(), 274 "innovation_cov": S.copy(), 275 } 276 277 return x_hat, innovation, extra_step 278 279 def optimize( 280 self, 281 input_signal: ArrayLike, 282 desired_signal: Optional[ArrayLike] = None, 283 verbose: bool = False, 284 return_internal_states: bool = False, 285 safe_eps: float = 1e-12, 286 ) -> OptimizationResult: 287 """ 288 Executes the Kalman recursion for a sequence of measurements ``y[k]``. 289 290 Parameters 291 ---------- 292 input_signal : array_like 293 Measurement sequence ``y[k]``. Accepted shapes: 294 - ``(N,)`` for scalar measurements 295 - ``(N, p)`` for p-dimensional measurements 296 - ``(N, p, 1)`` also accepted (squeezed to ``(N, p)``) 297 desired_signal : array_like, optional 298 Ignored (kept only for API standardization). 299 verbose : bool, optional 300 If True, prints the total runtime after completion. 301 return_internal_states : bool, optional 302 If True, returns selected internal values in ``result.extra``. 303 safe_eps : float, optional 304 Small positive value used to regularize the innovation covariance 305 matrix if a linear solve fails (numerical stabilization). 306 307 Returns 308 ------- 309 OptimizationResult 310 outputs : ndarray 311 State estimates ``x(k|k)``, shape ``(N, n)``. 312 errors : ndarray 313 Innovations ``v(k) = y(k) - C^T(k) x(k|k-1)``, shape ``(N, p)``. 314 coefficients : ndarray 315 Covariance history ``R_e(k|k)``, shape ``(N, n, n)``. 316 error_type : str 317 ``"innovation"``. 318 extra : dict, optional 319 Present only if ``return_internal_states=True``. See below. 320 321 Extra (when return_internal_states=True) 322 -------------------------------------- 323 kalman_gain_last : ndarray 324 Kalman gain ``K`` at the last iteration, shape ``(n, p)``. 325 predicted_state_last : ndarray 326 Predicted state ``x(k|k-1)`` at the last iteration, shape ``(n,)``. 327 predicted_cov_last : ndarray 328 Predicted covariance ``R_e(k|k-1)`` at the last iteration, shape ``(n, n)``. 329 innovation_cov_last : ndarray 330 Innovation covariance ``S`` at the last iteration, shape ``(p, p)``. 331 safe_eps : float 332 The stabilization epsilon used when regularizing ``S``. 333 """ 334 t0 = perf_counter() 335 336 y_mat = as_meas_matrix(np.asarray(input_signal)) 337 y_mat = y_mat.astype(self._dtype, copy=False) 338 339 N = int(y_mat.shape[0]) 340 n = int(self.x.shape[0]) 341 p_dim = int(y_mat.shape[1]) 342 343 outputs = np.zeros((N, n), dtype=self._dtype) 344 errors = np.zeros((N, p_dim), dtype=self._dtype) 345 346 self.w_history = [] 347 348 last_K: Optional[np.ndarray] = None 349 last_x_pred: Optional[np.ndarray] = None 350 last_Re_pred: Optional[np.ndarray] = None 351 last_S: Optional[np.ndarray] = None 352 353 for k in range(N): 354 x_hat, innov, extra_step = self.step(y_mat[k], k=k, safe_eps=safe_eps) 355 356 outputs[k, :] = x_hat 357 errors[k, :] = innov 358 359 self.w_history.append(self.Re.copy()) 360 361 last_K = extra_step["kalman_gain"] 362 last_x_pred = extra_step["predicted_state"].reshape(-1, 1) 363 last_Re_pred = extra_step["predicted_cov"] 364 last_S = extra_step["innovation_cov"] 365 366 runtime_s = float(perf_counter() - t0) 367 if verbose: 368 print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms") 369 370 extra: Optional[Dict[str, Any]] = None 371 if return_internal_states: 372 extra = { 373 "kalman_gain_last": last_K, 374 "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(), 375 "predicted_cov_last": last_Re_pred, 376 "innovation_cov_last": last_S, 377 "safe_eps": float(safe_eps), 378 } 379 380 return self._pack_results( 381 outputs=outputs, 382 errors=errors, 383 runtime_s=runtime_s, 384 error_type="innovation", 385 extra=extra, 386 )
Kalman filter for state estimation (real or complex-valued).
Implements the discrete-time Kalman filter recursion for linear state-space
models with additive process and measurement noise. Matrices may be constant
(single ndarray) or time-varying (a sequence of arrays indexed by k).
The model used is:
$$x(k) = A(k-1) x(k-1) + B(k) n(k),$$
$$y(k) = C^T(k) x(k) + n_1(k),$$
where \( n(k) \) is the process noise with covariance \( R_n(k) \) and \( n_1(k) \) is the measurement noise with covariance \( R_{n1}(k) \).
Notes
API integration
~~~
This class inherits from ~pydaptivefiltering.base.AdaptiveFilter to
share a common interface. Here, the "weights" are the state estimate:
self.w stores the current state vector (flattened), and
self.w_history stores the covariance matrices over time.
Time-varying matrices
~~~~~
Any of A, C_T, B, Rn, Rn1 may be provided either as:
- a constant
ndarray, used for all k; or - a sequence (list/tuple) of
ndarray, where elementkis used at time k.
Dimensions
~~
Let n be the state dimension, p the measurement dimension, and q
the process-noise dimension. Then:
A(k)has shape(n, n)C_T(k)has shape(p, n)(note: this is \( C^T \))B(k)has shape(n, q)Rn(k)has shape(q, q)Rn1(k)has shape(p, p)
If B is not provided, the implementation uses B = I (thus q = n),
and expects Rn to be shape (n, n).
Parameters
A : ndarray or Sequence[ndarray]
State transition matrix \( A(k-1) \) with shape (n, n).
C_T : ndarray or Sequence[ndarray]
Measurement matrix \( C^T(k) \) with shape (p, n).
Rn : ndarray or Sequence[ndarray]
Process noise covariance \( R_n(k) \) with shape (q, q).
Rn1 : ndarray or Sequence[ndarray]
Measurement noise covariance \( R_{n1}(k) \) with shape (p, p).
B : ndarray or Sequence[ndarray], optional
Process noise input matrix \( B(k) \) with shape (n, q).
If None, uses identity.
x_init : ndarray, optional
Initial state estimate \( x(0|0) \). Accepts shapes compatible with
(n,), (n,1), or (1,n). If None, initializes with zeros.
Re_init : ndarray, optional
Initial estimation error covariance \( R_e(0|0) \) with shape (n, n).
If None, initializes with identity.
References
112 def __init__( 113 self, 114 A: Union[np.ndarray, Sequence[np.ndarray]], 115 C_T: Union[np.ndarray, Sequence[np.ndarray]], 116 Rn: Union[np.ndarray, Sequence[np.ndarray]], 117 Rn1: Union[np.ndarray, Sequence[np.ndarray]], 118 B: Optional[Union[np.ndarray, Sequence[np.ndarray]]] = None, 119 x_init: Optional[np.ndarray] = None, 120 Re_init: Optional[np.ndarray] = None, 121 ) -> None: 122 A0 = mat_at_k(A, 0) 123 if A0.ndim != 2 or A0.shape[0] != A0.shape[1]: 124 raise ValueError(f"A must be square (n,n). Got {A0.shape}.") 125 n = int(A0.shape[0]) 126 127 super().__init__(filter_order=n - 1, w_init=None) 128 129 self.A = A 130 self.C_T = C_T 131 self.Rn = Rn 132 self.Rn1 = Rn1 133 self.B = B 134 135 dtype = np.result_type( 136 A0, mat_at_k(C_T, 0), mat_at_k(Rn, 0), mat_at_k(Rn1, 0) 137 ) 138 dtype = np.float64 if np.issubdtype(dtype, np.floating) else np.complex128 139 140 self._dtype = dtype 141 self.regressor = np.zeros(self.filter_order + 1, dtype=self._dtype) 142 self.w = np.zeros(self.filter_order + 1, dtype=self._dtype) 143 144 if x_init is None: 145 x0 = np.zeros((n, 1), dtype=dtype) 146 else: 147 x0 = as_2d_col(np.asarray(x_init, dtype=dtype)) 148 if x0.shape[0] != n: 149 raise ValueError(f"x_init must have length n={n}. Got {x0.shape}.") 150 self.x = x0 151 152 if Re_init is None: 153 Re0 = np.eye(n, dtype=dtype) 154 else: 155 Re0 = np.asarray(Re_init, dtype=dtype) 156 if Re0.shape != (n, n): 157 raise ValueError(f"Re_init must be shape (n,n)={(n,n)}. Got {Re0.shape}.") 158 self.Re = Re0 159 160 self.w = self.x[:, 0].copy() 161 self.w_history = []
185 def step( 186 self, 187 y_k: ArrayLike, 188 *, 189 k: int, 190 safe_eps: float = 1e-12, 191 ) -> Tuple[np.ndarray, np.ndarray, Dict[str, Any]]: 192 """ 193 Perform ONE Kalman update step using measurement y(k). 194 195 This enables online / decision-directed usage, while still supporting 196 time-varying matrices via the global index `k`. 197 198 Parameters 199 ---------- 200 y_k : array_like 201 Measurement at time k. Accepted shapes: 202 - scalar (for p=1) 203 - (p,), (p,1), (1,p) 204 k : int 205 Global iteration index (used to index time-varying matrices). 206 safe_eps : float 207 Regularization epsilon used if innovation covariance solve fails. 208 209 Returns 210 ------- 211 x_hat : ndarray 212 Posterior state estimate x(k|k), shape (n,). 213 innovation : ndarray 214 Innovation v(k) = y(k) - C^T(k) x(k|k-1), shape (p,). 215 extra_step : dict 216 Contains per-step internal matrices: 217 - "kalman_gain": K (n,p) 218 - "predicted_state": x_pred (n,) 219 - "predicted_cov": Re_pred (n,n) 220 - "innovation_cov": S (p,p) 221 """ 222 A_k = np.asarray(mat_at_k(self.A, k), dtype=self._dtype) 223 C_T_k = np.asarray(mat_at_k(self.C_T, k), dtype=self._dtype) 224 Rn_k = np.asarray(mat_at_k(self.Rn, k), dtype=self._dtype) 225 Rn1_k = np.asarray(mat_at_k(self.Rn1, k), dtype=self._dtype) 226 227 n = int(self.x.shape[0]) 228 I_n = np.eye(n, dtype=self._dtype) 229 230 B_k = np.asarray( 231 mat_at_k(self.B, k) if self.B is not None else I_n, 232 dtype=self._dtype, 233 ) 234 235 self._validate_step_shapes(A_k, C_T_k, Rn_k, Rn1_k, B_k) 236 237 # Normalize y_k into (p,1) 238 y_vec = as_2d_col(np.asarray(y_k, dtype=self._dtype).ravel()) 239 240 C_k = C_T_k.conj().T # (n,p) 241 242 # Predict 243 x_pred = A_k @ self.x 244 Re_pred = (A_k @ self.Re @ A_k.conj().T) + (B_k @ Rn_k @ B_k.conj().T) 245 246 # Innovation 247 e_k = y_vec - (C_T_k @ x_pred) # (p,1) 248 S = (C_T_k @ Re_pred @ C_k) + Rn1_k # (p,p) 249 250 RC = Re_pred @ C_k # (n,p) 251 252 # Gain: solve S^T * K^T = (RC)^T, robust to singularities 253 p_dim = int(C_T_k.shape[0]) 254 try: 255 K = np.linalg.solve(S.conj().T, RC.conj().T).conj().T 256 except np.linalg.LinAlgError: 257 S_reg = S + (safe_eps * np.eye(p_dim, dtype=self._dtype)) 258 K = np.linalg.solve(S_reg.conj().T, RC.conj().T).conj().T 259 260 # Update 261 self.x = x_pred + (K @ e_k) 262 self.Re = (I_n - (K @ C_T_k)) @ Re_pred 263 264 # Keep AdaptiveFilter "weights" in sync 265 self.w = self.x[:, 0].copy() 266 267 x_hat = self.x[:, 0].copy() 268 innovation = e_k[:, 0].copy() 269 270 extra_step = { 271 "kalman_gain": K, 272 "predicted_state": x_pred[:, 0].copy(), 273 "predicted_cov": Re_pred.copy(), 274 "innovation_cov": S.copy(), 275 } 276 277 return x_hat, innovation, extra_step
Perform ONE Kalman update step using measurement y(k).
This enables online / decision-directed usage, while still supporting
time-varying matrices via the global index k.
Parameters
y_k : array_like Measurement at time k. Accepted shapes: - scalar (for p=1) - (p,), (p,1), (1,p) k : int Global iteration index (used to index time-varying matrices). safe_eps : float Regularization epsilon used if innovation covariance solve fails.
Returns
x_hat : ndarray Posterior state estimate x(k|k), shape (n,). innovation : ndarray Innovation v(k) = y(k) - C^T(k) x(k|k-1), shape (p,). extra_step : dict Contains per-step internal matrices: - "kalman_gain": K (n,p) - "predicted_state": x_pred (n,) - "predicted_cov": Re_pred (n,n) - "innovation_cov": S (p,p)
279 def optimize( 280 self, 281 input_signal: ArrayLike, 282 desired_signal: Optional[ArrayLike] = None, 283 verbose: bool = False, 284 return_internal_states: bool = False, 285 safe_eps: float = 1e-12, 286 ) -> OptimizationResult: 287 """ 288 Executes the Kalman recursion for a sequence of measurements ``y[k]``. 289 290 Parameters 291 ---------- 292 input_signal : array_like 293 Measurement sequence ``y[k]``. Accepted shapes: 294 - ``(N,)`` for scalar measurements 295 - ``(N, p)`` for p-dimensional measurements 296 - ``(N, p, 1)`` also accepted (squeezed to ``(N, p)``) 297 desired_signal : array_like, optional 298 Ignored (kept only for API standardization). 299 verbose : bool, optional 300 If True, prints the total runtime after completion. 301 return_internal_states : bool, optional 302 If True, returns selected internal values in ``result.extra``. 303 safe_eps : float, optional 304 Small positive value used to regularize the innovation covariance 305 matrix if a linear solve fails (numerical stabilization). 306 307 Returns 308 ------- 309 OptimizationResult 310 outputs : ndarray 311 State estimates ``x(k|k)``, shape ``(N, n)``. 312 errors : ndarray 313 Innovations ``v(k) = y(k) - C^T(k) x(k|k-1)``, shape ``(N, p)``. 314 coefficients : ndarray 315 Covariance history ``R_e(k|k)``, shape ``(N, n, n)``. 316 error_type : str 317 ``"innovation"``. 318 extra : dict, optional 319 Present only if ``return_internal_states=True``. See below. 320 321 Extra (when return_internal_states=True) 322 -------------------------------------- 323 kalman_gain_last : ndarray 324 Kalman gain ``K`` at the last iteration, shape ``(n, p)``. 325 predicted_state_last : ndarray 326 Predicted state ``x(k|k-1)`` at the last iteration, shape ``(n,)``. 327 predicted_cov_last : ndarray 328 Predicted covariance ``R_e(k|k-1)`` at the last iteration, shape ``(n, n)``. 329 innovation_cov_last : ndarray 330 Innovation covariance ``S`` at the last iteration, shape ``(p, p)``. 331 safe_eps : float 332 The stabilization epsilon used when regularizing ``S``. 333 """ 334 t0 = perf_counter() 335 336 y_mat = as_meas_matrix(np.asarray(input_signal)) 337 y_mat = y_mat.astype(self._dtype, copy=False) 338 339 N = int(y_mat.shape[0]) 340 n = int(self.x.shape[0]) 341 p_dim = int(y_mat.shape[1]) 342 343 outputs = np.zeros((N, n), dtype=self._dtype) 344 errors = np.zeros((N, p_dim), dtype=self._dtype) 345 346 self.w_history = [] 347 348 last_K: Optional[np.ndarray] = None 349 last_x_pred: Optional[np.ndarray] = None 350 last_Re_pred: Optional[np.ndarray] = None 351 last_S: Optional[np.ndarray] = None 352 353 for k in range(N): 354 x_hat, innov, extra_step = self.step(y_mat[k], k=k, safe_eps=safe_eps) 355 356 outputs[k, :] = x_hat 357 errors[k, :] = innov 358 359 self.w_history.append(self.Re.copy()) 360 361 last_K = extra_step["kalman_gain"] 362 last_x_pred = extra_step["predicted_state"].reshape(-1, 1) 363 last_Re_pred = extra_step["predicted_cov"] 364 last_S = extra_step["innovation_cov"] 365 366 runtime_s = float(perf_counter() - t0) 367 if verbose: 368 print(f"[Kalman] Completed in {runtime_s * 1000:.03f} ms") 369 370 extra: Optional[Dict[str, Any]] = None 371 if return_internal_states: 372 extra = { 373 "kalman_gain_last": last_K, 374 "predicted_state_last": None if last_x_pred is None else last_x_pred[:, 0].copy(), 375 "predicted_cov_last": last_Re_pred, 376 "innovation_cov_last": last_S, 377 "safe_eps": float(safe_eps), 378 } 379 380 return self._pack_results( 381 outputs=outputs, 382 errors=errors, 383 runtime_s=runtime_s, 384 error_type="innovation", 385 extra=extra, 386 )
Executes the Kalman recursion for a sequence of measurements y[k].
Parameters
input_signal : array_like
Measurement sequence y[k]. Accepted shapes:
- (N,) for scalar measurements
- (N, p) for p-dimensional measurements
- (N, p, 1) also accepted (squeezed to (N, p))
desired_signal : array_like, optional
Ignored (kept only for API standardization).
verbose : bool, optional
If True, prints the total runtime after completion.
return_internal_states : bool, optional
If True, returns selected internal values in result.extra.
safe_eps : float, optional
Small positive value used to regularize the innovation covariance
matrix if a linear solve fails (numerical stabilization).
Returns
OptimizationResult
outputs : ndarray
State estimates x(k|k), shape (N, n).
errors : ndarray
Innovations v(k) = y(k) - C^T(k) x(k|k-1), shape (N, p).
coefficients : ndarray
Covariance history R_e(k|k), shape (N, n, n).
error_type : str
"innovation".
extra : dict, optional
Present only if return_internal_states=True. See below.
Extra (when return_internal_states=True)
kalman_gain_last : ndarray
Kalman gain K at the last iteration, shape (n, p).
predicted_state_last : ndarray
Predicted state x(k|k-1) at the last iteration, shape (n,).
predicted_cov_last : ndarray
Predicted covariance R_e(k|k-1) at the last iteration, shape (n, n).
innovation_cov_last : ndarray
Innovation covariance S at the last iteration, shape (p, p).
safe_eps : float
The stabilization epsilon used when regularizing S.
35def info(): 36 """Imprime informações sobre a cobertura de algoritmos da biblioteca.""" 37 print("\n" + "="*70) 38 print(" PyDaptive Filtering - Complete Library Overview") 39 print(" Reference: 'Adaptive Filtering' by Paulo S. R. Diniz") 40 print("="*70) 41 sections = { 42 "Cap 3/4 (LMS)": "LMS, NLMS, Affine Projection, Sign Algorithms, Transform Domain", 43 "Cap 5 (RLS)": "Standard RLS, Alternative RLS", 44 "Cap 6 (Set-Membership)": "SM-NLMS, BNLMS, SM-AP, Simplified AP/PUAP", 45 "Cap 7 (Lattice RLS)": "LRLS (Posteriori, Priori, Error Feedback), NLRLS", 46 "Cap 8 (Fast RLS)": "Fast Transversal RLS, Stabilized FTRLS", 47 "Cap 9 (QR)": "QR-Decomposition Based RLS", 48 "Cap 10 (IIR)": "Error Equation, Gauss-Newton, Steinglitz-McBride, RLS-IIR", 49 "Cap 11 (Nonlinear)": "Volterra (LMS/RLS), MLP, RBF, Bilinear RLS", 50 "Cap 12 (Subband)": "CFDLMS, DLCLLMS, OLSBLMS", 51 "Cap 13 (Blind)": "CMA, Godard, Sato, Blind Affine Projection", 52 "Cap 17 (Kalman)": "Kalman Filter", 53 } 54 for cap, algs in sections.items(): 55 print(f"\n{cap:25}: {algs}") 56 57 print("\n" + "-"*70) 58 print("Usage example: from pydaptivefiltering import LMS") 59 print("Documentation: help(pydaptivefiltering.LMS)") 60 print("="*70 + "\n")
Imprime informações sobre a cobertura de algoritmos da biblioteca.